Merge branch 'master' into feature/savestates-2
This commit is contained in:
commit
da3ab3d56e
|
@ -1,7 +1,13 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
# Setup RC file for tx
|
||||
echo $'[https://www.transifex.com]\nhostname = https://www.transifex.com\nusername = api\npassword = '"$TRANSIFEX_API_TOKEN"$'\n' > ~/.transifexrc
|
||||
cat << EOF > ~/.transifexrc
|
||||
[https://www.transifex.com]
|
||||
hostname = https://www.transifex.com
|
||||
username = api
|
||||
password = $TRANSIFEX_API_TOKEN
|
||||
EOF
|
||||
|
||||
|
||||
set -x
|
||||
|
||||
|
|
|
@ -40,6 +40,8 @@ CMAKE_DEPENDENT_OPTION(COMPILE_WITH_DWARF "Add DWARF debugging information" ON "
|
|||
|
||||
option(USE_SYSTEM_BOOST "Use the system Boost libs (instead of the bundled ones)" OFF)
|
||||
|
||||
CMAKE_DEPENDENT_OPTION(ENABLE_FDK "Use FDK AAC decoder" OFF "NOT ENABLE_FFMPEG_AUDIO_DECODER;NOT ENABLE_MF" OFF)
|
||||
|
||||
if(NOT EXISTS ${PROJECT_SOURCE_DIR}/.git/hooks/pre-commit)
|
||||
message(STATUS "Copying pre-commit hook")
|
||||
file(COPY hooks/pre-commit
|
||||
|
@ -218,6 +220,12 @@ if (ENABLE_FFMPEG_VIDEO_DUMPER)
|
|||
add_definitions(-DENABLE_FFMPEG_VIDEO_DUMPER)
|
||||
endif()
|
||||
|
||||
if (ENABLE_FDK)
|
||||
find_library(FDK_AAC fdk-aac DOC "The path to fdk_aac library")
|
||||
if(FDK_AAC STREQUAL "FDK_AAC-NOTFOUND")
|
||||
message(FATAL_ERROR "fdk_aac library not found.")
|
||||
endif()
|
||||
endif()
|
||||
# Platform-specific library requirements
|
||||
# ======================================
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Citra
|
||||
==============
|
||||
[![Travis CI Build Status](https://travis-ci.org/citra-emu/citra.svg?branch=master)](https://travis-ci.org/citra-emu/citra)
|
||||
[![Travis CI Build Status](https://travis-ci.com/citra-emu/citra.svg?branch=master)](https://travis-ci.com/citra-emu/citra)
|
||||
[![AppVeyor CI Build Status](https://ci.appveyor.com/api/projects/status/sdf1o4kh3g1e68m9?svg=true)](https://ci.appveyor.com/project/bunnei/citra)
|
||||
[![Bitrise CI Build Status](https://app.bitrise.io/app/4ccd8e5720f0d13b/status.svg?token=H32TmbCwxb3OQ-M66KbAyw&branch=master)](https://app.bitrise.io/app/4ccd8e5720f0d13b)
|
||||
|
||||
|
@ -16,13 +16,13 @@ Check out our [website](https://citra-emu.org/)!
|
|||
|
||||
Need help? Check out our [asking for help](https://citra-emu.org/help/reference/asking/) guide.
|
||||
|
||||
For development discussion, please join us at #citra-dev on freenode.
|
||||
For development discussion, please join us on our [Discord server](https://citra-emu.org/discord/) or at #citra-dev on freenode.
|
||||
|
||||
### Development
|
||||
|
||||
Most of the development happens on GitHub. It's also where [our central repository](https://github.com/citra-emu/citra) is hosted.
|
||||
|
||||
If you want to contribute please take a look at the [Contributor's Guide](https://github.com/citra-emu/citra/wiki/Contributing) and [Developer Information](https://github.com/citra-emu/citra/wiki/Developer-Information). You should as well contact any of the developers in the forum in order to know about the current state of the emulator because the [TODO list](https://docs.google.com/document/d/1SWIop0uBI9IW8VGg97TAtoT_CHNoP42FzYmvG1F4QDA) isn't maintained anymore.
|
||||
If you want to contribute please take a look at the [Contributor's Guide](https://github.com/citra-emu/citra/wiki/Contributing) and [Developer Information](https://github.com/citra-emu/citra/wiki/Developer-Information). You should also contact any of the developers in the forum in order to know about the current state of the emulator because the [TODO list](https://docs.google.com/document/d/1SWIop0uBI9IW8VGg97TAtoT_CHNoP42FzYmvG1F4QDA) isn't maintained anymore.
|
||||
|
||||
If you want to contribute to the user interface translation, please checkout [citra project on transifex](https://www.transifex.com/citra/citra). We centralize the translation work there, and periodically upstream translation.
|
||||
|
||||
|
@ -39,6 +39,5 @@ We happily accept monetary donations or donated games and hardware. Please see o
|
|||
* 3DS games for testing
|
||||
* Any equipment required for homebrew
|
||||
* Infrastructure setup
|
||||
* Eventually 3D displays to get proper 3D output working
|
||||
|
||||
We also more than gladly accept used 3DS consoles, preferably ones with firmware 4.5 or lower! If you would like to give yours away, don't hesitate to join our IRC channel #citra on [Freenode](http://webchat.freenode.net/?channels=citra) and talk to neobrain or bunnei. Mind you, IRC is slow-paced, so it might be a while until people reply. If you're in a hurry you can just leave contact details in the channel or via private message and we'll get back to you.
|
||||
We also more than gladly accept used 3DS consoles! If you would like to give yours away, don't hesitate to join our [Discord server](https://citra-emu.org/discord/) and talk to bunnei.
|
||||
|
|
|
@ -52,7 +52,7 @@ workflows:
|
|||
sudo apt remove cmake -y
|
||||
sudo apt purge --auto-remove cmake -y
|
||||
sudo apt install ninja-build -y
|
||||
version=3.8
|
||||
version=3.10
|
||||
build=2
|
||||
mkdir ~/temp
|
||||
cd ~/temp
|
||||
|
@ -97,7 +97,7 @@ workflows:
|
|||
sudo apt remove cmake -y
|
||||
sudo apt purge --auto-remove cmake -y
|
||||
sudo apt install ninja-build -y
|
||||
version=3.8
|
||||
version=3.10
|
||||
build=2
|
||||
mkdir ~/temp
|
||||
cd ~/temp
|
||||
|
|
|
@ -115,9 +115,14 @@ if (ENABLE_WEB_SERVICE)
|
|||
# lurlparser
|
||||
add_subdirectory(lurlparser EXCLUDE_FROM_ALL)
|
||||
|
||||
if(ANDROID)
|
||||
add_subdirectory(android-ifaddrs)
|
||||
endif()
|
||||
|
||||
# httplib
|
||||
add_library(httplib INTERFACE)
|
||||
target_include_directories(httplib INTERFACE ./httplib)
|
||||
target_compile_options(httplib INTERFACE -DCPPHTTPLIB_OPENSSL_SUPPORT)
|
||||
|
||||
# cpp-jwt
|
||||
add_library(cpp-jwt INTERFACE)
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
add_library(ifaddrs
|
||||
ifaddrs.c
|
||||
ifaddrs.h
|
||||
)
|
||||
|
||||
create_target_directory_groups(ifaddrs)
|
||||
|
||||
target_include_directories(ifaddrs INTERFACE ${CMAKE_CURRENT_SOURCE_DIR})
|
|
@ -0,0 +1,600 @@
|
|||
/*
|
||||
Copyright (c) 2013, Kenneth MacKay
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "ifaddrs.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/socket.h>
|
||||
#include <net/if_arp.h>
|
||||
#include <netinet/in.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
|
||||
typedef struct NetlinkList
|
||||
{
|
||||
struct NetlinkList *m_next;
|
||||
struct nlmsghdr *m_data;
|
||||
unsigned int m_size;
|
||||
} NetlinkList;
|
||||
|
||||
static int netlink_socket(void)
|
||||
{
|
||||
int l_socket = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
|
||||
if(l_socket < 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct sockaddr_nl l_addr;
|
||||
memset(&l_addr, 0, sizeof(l_addr));
|
||||
l_addr.nl_family = AF_NETLINK;
|
||||
if(bind(l_socket, (struct sockaddr *)&l_addr, sizeof(l_addr)) < 0)
|
||||
{
|
||||
close(l_socket);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return l_socket;
|
||||
}
|
||||
|
||||
static int netlink_send(int p_socket, int p_request)
|
||||
{
|
||||
char l_buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))];
|
||||
memset(l_buffer, 0, sizeof(l_buffer));
|
||||
struct nlmsghdr *l_hdr = (struct nlmsghdr *)l_buffer;
|
||||
struct rtgenmsg *l_msg = (struct rtgenmsg *)NLMSG_DATA(l_hdr);
|
||||
|
||||
l_hdr->nlmsg_len = NLMSG_LENGTH(sizeof(*l_msg));
|
||||
l_hdr->nlmsg_type = p_request;
|
||||
l_hdr->nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
|
||||
l_hdr->nlmsg_pid = 0;
|
||||
l_hdr->nlmsg_seq = p_socket;
|
||||
l_msg->rtgen_family = AF_UNSPEC;
|
||||
|
||||
struct sockaddr_nl l_addr;
|
||||
memset(&l_addr, 0, sizeof(l_addr));
|
||||
l_addr.nl_family = AF_NETLINK;
|
||||
return (sendto(p_socket, l_hdr, l_hdr->nlmsg_len, 0, (struct sockaddr *)&l_addr, sizeof(l_addr)));
|
||||
}
|
||||
|
||||
static int netlink_recv(int p_socket, void *p_buffer, size_t p_len)
|
||||
{
|
||||
struct msghdr l_msg;
|
||||
struct iovec l_iov = { p_buffer, p_len };
|
||||
struct sockaddr_nl l_addr;
|
||||
int l_result;
|
||||
|
||||
for(;;)
|
||||
{
|
||||
l_msg.msg_name = (void *)&l_addr;
|
||||
l_msg.msg_namelen = sizeof(l_addr);
|
||||
l_msg.msg_iov = &l_iov;
|
||||
l_msg.msg_iovlen = 1;
|
||||
l_msg.msg_control = NULL;
|
||||
l_msg.msg_controllen = 0;
|
||||
l_msg.msg_flags = 0;
|
||||
int l_result = recvmsg(p_socket, &l_msg, 0);
|
||||
|
||||
if(l_result < 0)
|
||||
{
|
||||
if(errno == EINTR)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
return -2;
|
||||
}
|
||||
|
||||
if(l_msg.msg_flags & MSG_TRUNC)
|
||||
{ // buffer was too small
|
||||
return -1;
|
||||
}
|
||||
return l_result;
|
||||
}
|
||||
}
|
||||
|
||||
static struct nlmsghdr *getNetlinkResponse(int p_socket, int *p_size, int *p_done)
|
||||
{
|
||||
size_t l_size = 4096;
|
||||
void *l_buffer = NULL;
|
||||
|
||||
for(;;)
|
||||
{
|
||||
free(l_buffer);
|
||||
l_buffer = malloc(l_size);
|
||||
|
||||
int l_read = netlink_recv(p_socket, l_buffer, l_size);
|
||||
*p_size = l_read;
|
||||
if(l_read == -2)
|
||||
{
|
||||
free(l_buffer);
|
||||
return NULL;
|
||||
}
|
||||
if(l_read >= 0)
|
||||
{
|
||||
pid_t l_pid = getpid();
|
||||
struct nlmsghdr *l_hdr;
|
||||
for(l_hdr = (struct nlmsghdr *)l_buffer; NLMSG_OK(l_hdr, (unsigned int)l_read); l_hdr = (struct nlmsghdr *)NLMSG_NEXT(l_hdr, l_read))
|
||||
{
|
||||
if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if(l_hdr->nlmsg_type == NLMSG_DONE)
|
||||
{
|
||||
*p_done = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if(l_hdr->nlmsg_type == NLMSG_ERROR)
|
||||
{
|
||||
free(l_buffer);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
return l_buffer;
|
||||
}
|
||||
|
||||
l_size *= 2;
|
||||
}
|
||||
}
|
||||
|
||||
static NetlinkList *newListItem(struct nlmsghdr *p_data, unsigned int p_size)
|
||||
{
|
||||
NetlinkList *l_item = malloc(sizeof(NetlinkList));
|
||||
l_item->m_next = NULL;
|
||||
l_item->m_data = p_data;
|
||||
l_item->m_size = p_size;
|
||||
return l_item;
|
||||
}
|
||||
|
||||
static void freeResultList(NetlinkList *p_list)
|
||||
{
|
||||
NetlinkList *l_cur;
|
||||
while(p_list)
|
||||
{
|
||||
l_cur = p_list;
|
||||
p_list = p_list->m_next;
|
||||
free(l_cur->m_data);
|
||||
free(l_cur);
|
||||
}
|
||||
}
|
||||
|
||||
static NetlinkList *getResultList(int p_socket, int p_request)
|
||||
{
|
||||
if(netlink_send(p_socket, p_request) < 0)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
NetlinkList *l_list = NULL;
|
||||
NetlinkList *l_end = NULL;
|
||||
int l_size;
|
||||
int l_done = 0;
|
||||
while(!l_done)
|
||||
{
|
||||
struct nlmsghdr *l_hdr = getNetlinkResponse(p_socket, &l_size, &l_done);
|
||||
if(!l_hdr)
|
||||
{ // error
|
||||
freeResultList(l_list);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
NetlinkList *l_item = newListItem(l_hdr, l_size);
|
||||
if(!l_list)
|
||||
{
|
||||
l_list = l_item;
|
||||
}
|
||||
else
|
||||
{
|
||||
l_end->m_next = l_item;
|
||||
}
|
||||
l_end = l_item;
|
||||
}
|
||||
return l_list;
|
||||
}
|
||||
|
||||
static size_t maxSize(size_t a, size_t b)
|
||||
{
|
||||
return (a > b ? a : b);
|
||||
}
|
||||
|
||||
static size_t calcAddrLen(sa_family_t p_family, int p_dataSize)
|
||||
{
|
||||
switch(p_family)
|
||||
{
|
||||
case AF_INET:
|
||||
return sizeof(struct sockaddr_in);
|
||||
case AF_INET6:
|
||||
return sizeof(struct sockaddr_in6);
|
||||
case AF_PACKET:
|
||||
return maxSize(sizeof(struct sockaddr_ll), offsetof(struct sockaddr_ll, sll_addr) + p_dataSize);
|
||||
default:
|
||||
return maxSize(sizeof(struct sockaddr), offsetof(struct sockaddr, sa_data) + p_dataSize);
|
||||
}
|
||||
}
|
||||
|
||||
static void makeSockaddr(sa_family_t p_family, struct sockaddr *p_dest, void *p_data, size_t p_size)
|
||||
{
|
||||
switch(p_family)
|
||||
{
|
||||
case AF_INET:
|
||||
memcpy(&((struct sockaddr_in*)p_dest)->sin_addr, p_data, p_size);
|
||||
break;
|
||||
case AF_INET6:
|
||||
memcpy(&((struct sockaddr_in6*)p_dest)->sin6_addr, p_data, p_size);
|
||||
break;
|
||||
case AF_PACKET:
|
||||
memcpy(((struct sockaddr_ll*)p_dest)->sll_addr, p_data, p_size);
|
||||
((struct sockaddr_ll*)p_dest)->sll_halen = p_size;
|
||||
break;
|
||||
default:
|
||||
memcpy(p_dest->sa_data, p_data, p_size);
|
||||
break;
|
||||
}
|
||||
p_dest->sa_family = p_family;
|
||||
}
|
||||
|
||||
static void addToEnd(struct ifaddrs **p_resultList, struct ifaddrs *p_entry)
|
||||
{
|
||||
if(!*p_resultList)
|
||||
{
|
||||
*p_resultList = p_entry;
|
||||
}
|
||||
else
|
||||
{
|
||||
struct ifaddrs *l_cur = *p_resultList;
|
||||
while(l_cur->ifa_next)
|
||||
{
|
||||
l_cur = l_cur->ifa_next;
|
||||
}
|
||||
l_cur->ifa_next = p_entry;
|
||||
}
|
||||
}
|
||||
|
||||
static void interpretLink(struct nlmsghdr *p_hdr, struct ifaddrs **p_links, struct ifaddrs **p_resultList)
|
||||
{
|
||||
struct ifinfomsg *l_info = (struct ifinfomsg *)NLMSG_DATA(p_hdr);
|
||||
|
||||
size_t l_nameSize = 0;
|
||||
size_t l_addrSize = 0;
|
||||
size_t l_dataSize = 0;
|
||||
|
||||
size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
|
||||
struct rtattr *l_rta;
|
||||
for(l_rta = (struct rtattr *)(((char *)l_info) + NLMSG_ALIGN(sizeof(struct ifinfomsg))); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
|
||||
{
|
||||
void *l_rtaData = RTA_DATA(l_rta);
|
||||
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
|
||||
switch(l_rta->rta_type)
|
||||
{
|
||||
case IFLA_ADDRESS:
|
||||
case IFLA_BROADCAST:
|
||||
l_addrSize += NLMSG_ALIGN(calcAddrLen(AF_PACKET, l_rtaDataSize));
|
||||
break;
|
||||
case IFLA_IFNAME:
|
||||
l_nameSize += NLMSG_ALIGN(l_rtaSize + 1);
|
||||
break;
|
||||
case IFLA_STATS:
|
||||
l_dataSize += NLMSG_ALIGN(l_rtaSize);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
struct ifaddrs *l_entry = malloc(sizeof(struct ifaddrs) + l_nameSize + l_addrSize + l_dataSize);
|
||||
memset(l_entry, 0, sizeof(struct ifaddrs));
|
||||
l_entry->ifa_name = "";
|
||||
|
||||
char *l_name = ((char *)l_entry) + sizeof(struct ifaddrs);
|
||||
char *l_addr = l_name + l_nameSize;
|
||||
char *l_data = l_addr + l_addrSize;
|
||||
|
||||
l_entry->ifa_flags = l_info->ifi_flags;
|
||||
|
||||
l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
|
||||
for(l_rta = (struct rtattr *)(((char *)l_info) + NLMSG_ALIGN(sizeof(struct ifinfomsg))); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
|
||||
{
|
||||
void *l_rtaData = RTA_DATA(l_rta);
|
||||
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
|
||||
switch(l_rta->rta_type)
|
||||
{
|
||||
case IFLA_ADDRESS:
|
||||
case IFLA_BROADCAST:
|
||||
{
|
||||
size_t l_addrLen = calcAddrLen(AF_PACKET, l_rtaDataSize);
|
||||
makeSockaddr(AF_PACKET, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
|
||||
((struct sockaddr_ll *)l_addr)->sll_ifindex = l_info->ifi_index;
|
||||
((struct sockaddr_ll *)l_addr)->sll_hatype = l_info->ifi_type;
|
||||
if(l_rta->rta_type == IFLA_ADDRESS)
|
||||
{
|
||||
l_entry->ifa_addr = (struct sockaddr *)l_addr;
|
||||
}
|
||||
else
|
||||
{
|
||||
l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
|
||||
}
|
||||
l_addr += NLMSG_ALIGN(l_addrLen);
|
||||
break;
|
||||
}
|
||||
case IFLA_IFNAME:
|
||||
strncpy(l_name, l_rtaData, l_rtaDataSize);
|
||||
l_name[l_rtaDataSize] = '\0';
|
||||
l_entry->ifa_name = l_name;
|
||||
break;
|
||||
case IFLA_STATS:
|
||||
memcpy(l_data, l_rtaData, l_rtaDataSize);
|
||||
l_entry->ifa_data = l_data;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
addToEnd(p_resultList, l_entry);
|
||||
p_links[l_info->ifi_index - 1] = l_entry;
|
||||
}
|
||||
|
||||
static void interpretAddr(struct nlmsghdr *p_hdr, struct ifaddrs **p_links, struct ifaddrs **p_resultList)
|
||||
{
|
||||
struct ifaddrmsg *l_info = (struct ifaddrmsg *)NLMSG_DATA(p_hdr);
|
||||
|
||||
size_t l_nameSize = 0;
|
||||
size_t l_addrSize = 0;
|
||||
|
||||
int l_addedNetmask = 0;
|
||||
|
||||
size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
|
||||
struct rtattr *l_rta;
|
||||
for(l_rta = (struct rtattr *)(((char *)l_info) + NLMSG_ALIGN(sizeof(struct ifaddrmsg))); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
|
||||
{
|
||||
void *l_rtaData = RTA_DATA(l_rta);
|
||||
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
|
||||
if(l_info->ifa_family == AF_PACKET)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
switch(l_rta->rta_type)
|
||||
{
|
||||
case IFA_ADDRESS:
|
||||
case IFA_LOCAL:
|
||||
if((l_info->ifa_family == AF_INET || l_info->ifa_family == AF_INET6) && !l_addedNetmask)
|
||||
{ // make room for netmask
|
||||
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
|
||||
l_addedNetmask = 1;
|
||||
}
|
||||
case IFA_BROADCAST:
|
||||
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
|
||||
break;
|
||||
case IFA_LABEL:
|
||||
l_nameSize += NLMSG_ALIGN(l_rtaSize + 1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
struct ifaddrs *l_entry = malloc(sizeof(struct ifaddrs) + l_nameSize + l_addrSize);
|
||||
memset(l_entry, 0, sizeof(struct ifaddrs));
|
||||
l_entry->ifa_name = p_links[l_info->ifa_index - 1]->ifa_name;
|
||||
|
||||
char *l_name = ((char *)l_entry) + sizeof(struct ifaddrs);
|
||||
char *l_addr = l_name + l_nameSize;
|
||||
|
||||
l_entry->ifa_flags = l_info->ifa_flags | p_links[l_info->ifa_index - 1]->ifa_flags;
|
||||
|
||||
l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
|
||||
for(l_rta = (struct rtattr *)(((char *)l_info) + NLMSG_ALIGN(sizeof(struct ifaddrmsg))); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
|
||||
{
|
||||
void *l_rtaData = RTA_DATA(l_rta);
|
||||
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
|
||||
switch(l_rta->rta_type)
|
||||
{
|
||||
case IFA_ADDRESS:
|
||||
case IFA_BROADCAST:
|
||||
case IFA_LOCAL:
|
||||
{
|
||||
size_t l_addrLen = calcAddrLen(l_info->ifa_family, l_rtaDataSize);
|
||||
makeSockaddr(l_info->ifa_family, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
|
||||
if(l_info->ifa_family == AF_INET6)
|
||||
{
|
||||
if(IN6_IS_ADDR_LINKLOCAL((struct in6_addr *)l_rtaData) || IN6_IS_ADDR_MC_LINKLOCAL((struct in6_addr *)l_rtaData))
|
||||
{
|
||||
((struct sockaddr_in6 *)l_addr)->sin6_scope_id = l_info->ifa_index;
|
||||
}
|
||||
}
|
||||
|
||||
if(l_rta->rta_type == IFA_ADDRESS)
|
||||
{ // apparently in a point-to-point network IFA_ADDRESS contains the dest address and IFA_LOCAL contains the local address
|
||||
if(l_entry->ifa_addr)
|
||||
{
|
||||
l_entry->ifa_dstaddr = (struct sockaddr *)l_addr;
|
||||
}
|
||||
else
|
||||
{
|
||||
l_entry->ifa_addr = (struct sockaddr *)l_addr;
|
||||
}
|
||||
}
|
||||
else if(l_rta->rta_type == IFA_LOCAL)
|
||||
{
|
||||
if(l_entry->ifa_addr)
|
||||
{
|
||||
l_entry->ifa_dstaddr = l_entry->ifa_addr;
|
||||
}
|
||||
l_entry->ifa_addr = (struct sockaddr *)l_addr;
|
||||
}
|
||||
else
|
||||
{
|
||||
l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
|
||||
}
|
||||
l_addr += NLMSG_ALIGN(l_addrLen);
|
||||
break;
|
||||
}
|
||||
case IFA_LABEL:
|
||||
strncpy(l_name, l_rtaData, l_rtaDataSize);
|
||||
l_name[l_rtaDataSize] = '\0';
|
||||
l_entry->ifa_name = l_name;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(l_entry->ifa_addr && (l_entry->ifa_addr->sa_family == AF_INET || l_entry->ifa_addr->sa_family == AF_INET6))
|
||||
{
|
||||
unsigned l_maxPrefix = (l_entry->ifa_addr->sa_family == AF_INET ? 32 : 128);
|
||||
unsigned l_prefix = (l_info->ifa_prefixlen > l_maxPrefix ? l_maxPrefix : l_info->ifa_prefixlen);
|
||||
char l_mask[16] = {0};
|
||||
unsigned i;
|
||||
for(i=0; i<(l_prefix/8); ++i)
|
||||
{
|
||||
l_mask[i] = 0xff;
|
||||
}
|
||||
l_mask[i] = 0xff << (8 - (l_prefix % 8));
|
||||
|
||||
makeSockaddr(l_entry->ifa_addr->sa_family, (struct sockaddr *)l_addr, l_mask, l_maxPrefix / 8);
|
||||
l_entry->ifa_netmask = (struct sockaddr *)l_addr;
|
||||
}
|
||||
|
||||
addToEnd(p_resultList, l_entry);
|
||||
}
|
||||
|
||||
static void interpret(int p_socket, NetlinkList *p_netlinkList, struct ifaddrs **p_links, struct ifaddrs **p_resultList)
|
||||
{
|
||||
pid_t l_pid = getpid();
|
||||
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
|
||||
{
|
||||
unsigned int l_nlsize = p_netlinkList->m_size;
|
||||
struct nlmsghdr *l_hdr;
|
||||
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
|
||||
{
|
||||
if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if(l_hdr->nlmsg_type == NLMSG_DONE)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
if(l_hdr->nlmsg_type == RTM_NEWLINK)
|
||||
{
|
||||
interpretLink(l_hdr, p_links, p_resultList);
|
||||
}
|
||||
else if(l_hdr->nlmsg_type == RTM_NEWADDR)
|
||||
{
|
||||
interpretAddr(l_hdr, p_links, p_resultList);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned countLinks(int p_socket, NetlinkList *p_netlinkList)
|
||||
{
|
||||
unsigned l_links = 0;
|
||||
pid_t l_pid = getpid();
|
||||
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
|
||||
{
|
||||
unsigned int l_nlsize = p_netlinkList->m_size;
|
||||
struct nlmsghdr *l_hdr;
|
||||
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
|
||||
{
|
||||
if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if(l_hdr->nlmsg_type == NLMSG_DONE)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
if(l_hdr->nlmsg_type == RTM_NEWLINK)
|
||||
{
|
||||
++l_links;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return l_links;
|
||||
}
|
||||
|
||||
int getifaddrs(struct ifaddrs **ifap)
|
||||
{
|
||||
if(!ifap)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
*ifap = NULL;
|
||||
|
||||
int l_socket = netlink_socket();
|
||||
if(l_socket < 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
NetlinkList *l_linkResults = getResultList(l_socket, RTM_GETLINK);
|
||||
if(!l_linkResults)
|
||||
{
|
||||
close(l_socket);
|
||||
return -1;
|
||||
}
|
||||
|
||||
NetlinkList *l_addrResults = getResultList(l_socket, RTM_GETADDR);
|
||||
if(!l_addrResults)
|
||||
{
|
||||
close(l_socket);
|
||||
freeResultList(l_linkResults);
|
||||
return -1;
|
||||
}
|
||||
|
||||
unsigned l_numLinks = countLinks(l_socket, l_linkResults) + countLinks(l_socket, l_addrResults);
|
||||
struct ifaddrs *l_links[l_numLinks];
|
||||
memset(l_links, 0, l_numLinks * sizeof(struct ifaddrs *));
|
||||
|
||||
interpret(l_socket, l_linkResults, l_links, ifap);
|
||||
interpret(l_socket, l_addrResults, l_links, ifap);
|
||||
|
||||
freeResultList(l_linkResults);
|
||||
freeResultList(l_addrResults);
|
||||
close(l_socket);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void freeifaddrs(struct ifaddrs *ifa)
|
||||
{
|
||||
struct ifaddrs *l_cur;
|
||||
while(ifa)
|
||||
{
|
||||
l_cur = ifa;
|
||||
ifa = ifa->ifa_next;
|
||||
free(l_cur);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Copyright (c) 1995, 1999
|
||||
* Berkeley Software Design, Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Berkeley Software Design, Inc. ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design, Inc. BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* BSDI ifaddrs.h,v 2.5 2000/02/23 14:51:59 dab Exp
|
||||
*/
|
||||
|
||||
#ifndef _IFADDRS_H_
|
||||
#define _IFADDRS_H_
|
||||
|
||||
struct ifaddrs {
|
||||
struct ifaddrs *ifa_next;
|
||||
char *ifa_name;
|
||||
unsigned int ifa_flags;
|
||||
struct sockaddr *ifa_addr;
|
||||
struct sockaddr *ifa_netmask;
|
||||
struct sockaddr *ifa_dstaddr;
|
||||
void *ifa_data;
|
||||
};
|
||||
|
||||
/*
|
||||
* This may have been defined in <net/if.h>. Note that if <net/if.h> is
|
||||
* to be included it must be included before this header file.
|
||||
*/
|
||||
#ifndef ifa_broadaddr
|
||||
#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */
|
||||
#endif
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
|
||||
__BEGIN_DECLS
|
||||
extern int getifaddrs(struct ifaddrs **ifap);
|
||||
extern void freeifaddrs(struct ifaddrs *ifa);
|
||||
__END_DECLS
|
||||
|
||||
#endif
|
|
@ -1 +1 @@
|
|||
Subproject commit 6d7edc593be8e47c8de7bc5f7d6b32971fad0c24
|
||||
Subproject commit 727f616b6e5cafaba072131c077a3b8fea87b8be
|
|
@ -1,4 +1,4 @@
|
|||
From https://github.com/yhirose/cpp-httplib/commit/d9479bc0b12e8a1e8bce2d34da4feeef488581f3
|
||||
From https://github.com/yhirose/cpp-httplib/commit/b251668522dd459d2c6a75c10390a11b640be708
|
||||
|
||||
MIT License
|
||||
|
||||
|
@ -13,3 +13,4 @@ It's extremely easy to setup. Just include httplib.h file in your code!
|
|||
Inspired by Sinatra and express.
|
||||
|
||||
© 2017 Yuji Hirose
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -62,6 +62,13 @@ elseif(ENABLE_FFMPEG_AUDIO_DECODER)
|
|||
target_include_directories(audio_core PRIVATE ${FFMPEG_DIR}/include)
|
||||
endif()
|
||||
target_compile_definitions(audio_core PUBLIC HAVE_FFMPEG)
|
||||
elseif(ENABLE_FDK)
|
||||
target_sources(audio_core PRIVATE
|
||||
hle/fdk_decoder.cpp
|
||||
hle/fdk_decoder.h
|
||||
)
|
||||
target_link_libraries(audio_core PRIVATE ${FDK_AAC})
|
||||
target_compile_definitions(audio_core PUBLIC HAVE_FDK)
|
||||
endif()
|
||||
|
||||
if(SDL2_FOUND)
|
||||
|
|
|
@ -0,0 +1,233 @@
|
|||
// Copyright 2019 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <fdk-aac/aacdecoder_lib.h>
|
||||
#include "audio_core/hle/fdk_decoder.h"
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
class FDKDecoder::Impl {
|
||||
public:
|
||||
explicit Impl(Memory::MemorySystem& memory);
|
||||
~Impl();
|
||||
std::optional<BinaryResponse> ProcessRequest(const BinaryRequest& request);
|
||||
bool IsValid() const {
|
||||
return decoder != nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
std::optional<BinaryResponse> Initalize(const BinaryRequest& request);
|
||||
|
||||
std::optional<BinaryResponse> Decode(const BinaryRequest& request);
|
||||
|
||||
void Clear();
|
||||
|
||||
Memory::MemorySystem& memory;
|
||||
|
||||
HANDLE_AACDECODER decoder = nullptr;
|
||||
};
|
||||
|
||||
FDKDecoder::Impl::Impl(Memory::MemorySystem& memory) : memory(memory) {
|
||||
// allocate an array of LIB_INFO structures
|
||||
// if we don't pre-fill the whole segment with zeros, when we call `aacDecoder_GetLibInfo`
|
||||
// it will segfault, upon investigation, there is some code in fdk_aac depends on your initial
|
||||
// values in this array
|
||||
LIB_INFO decoder_info[FDK_MODULE_LAST] = {};
|
||||
// get library information and fill the struct
|
||||
if (aacDecoder_GetLibInfo(decoder_info) != 0) {
|
||||
LOG_ERROR(Audio_DSP, "Failed to retrieve fdk_aac library information!");
|
||||
return;
|
||||
}
|
||||
// This segment: identify the broken fdk_aac implementation
|
||||
// and refuse to initialize if identified as broken (check for module IDs)
|
||||
// although our AAC samples do not contain SBC feature, this is a way to detect
|
||||
// watered down version of fdk_aac implementations
|
||||
if (FDKlibInfo_getCapabilities(decoder_info, FDK_SBRDEC) == 0) {
|
||||
LOG_ERROR(Audio_DSP, "Bad fdk_aac library found! Initialization aborted!");
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_INFO(Audio_DSP, "Using fdk_aac version {} (build date: {})", decoder_info[0].versionStr,
|
||||
decoder_info[0].build_date);
|
||||
|
||||
// choose the input format when initializing: 1 layer of ADTS
|
||||
decoder = aacDecoder_Open(TRANSPORT_TYPE::TT_MP4_ADTS, 1);
|
||||
// set maximum output channel to two (stereo)
|
||||
// if the input samples have more channels, fdk_aac will perform a downmix
|
||||
AAC_DECODER_ERROR ret = aacDecoder_SetParam(decoder, AAC_PCM_MAX_OUTPUT_CHANNELS, 2);
|
||||
if (ret != AAC_DEC_OK) {
|
||||
// unable to set this parameter reflects the decoder implementation might be broken
|
||||
// we'd better shuts down everything
|
||||
aacDecoder_Close(decoder);
|
||||
decoder = nullptr;
|
||||
LOG_ERROR(Audio_DSP, "Unable to set downmix parameter: {}", ret);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryResponse> FDKDecoder::Impl::Initalize(const BinaryRequest& request) {
|
||||
BinaryResponse response;
|
||||
std::memcpy(&response, &request, sizeof(response));
|
||||
response.unknown1 = 0x0;
|
||||
|
||||
if (decoder) {
|
||||
LOG_INFO(Audio_DSP, "FDK Decoder initialized");
|
||||
Clear();
|
||||
} else {
|
||||
LOG_ERROR(Audio_DSP, "Decoder not initialized");
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
FDKDecoder::Impl::~Impl() {
|
||||
if (decoder)
|
||||
aacDecoder_Close(decoder);
|
||||
}
|
||||
|
||||
void FDKDecoder::Impl::Clear() {
|
||||
s16 decoder_output[8192];
|
||||
// flush and re-sync the decoder, discarding the internal buffer
|
||||
// we actually don't care if this succeeds or not
|
||||
// FLUSH - flush internal buffer
|
||||
// INTR - treat the current internal buffer as discontinuous
|
||||
// CONCEAL - try to interpolate and smooth out the samples
|
||||
if (decoder)
|
||||
aacDecoder_DecodeFrame(decoder, decoder_output, 8192,
|
||||
AACDEC_FLUSH & AACDEC_INTR & AACDEC_CONCEAL);
|
||||
}
|
||||
|
||||
std::optional<BinaryResponse> FDKDecoder::Impl::ProcessRequest(const BinaryRequest& request) {
|
||||
if (request.codec != DecoderCodec::AAC) {
|
||||
LOG_ERROR(Audio_DSP, "FDK AAC Decoder cannot handle such codec: {}",
|
||||
static_cast<u16>(request.codec));
|
||||
return {};
|
||||
}
|
||||
|
||||
switch (request.cmd) {
|
||||
case DecoderCommand::Init: {
|
||||
return Initalize(request);
|
||||
}
|
||||
case DecoderCommand::Decode: {
|
||||
return Decode(request);
|
||||
}
|
||||
case DecoderCommand::Unknown: {
|
||||
BinaryResponse response;
|
||||
std::memcpy(&response, &request, sizeof(response));
|
||||
response.unknown1 = 0x0;
|
||||
return response;
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(Audio_DSP, "Got unknown binary request: {}", static_cast<u16>(request.cmd));
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryResponse> FDKDecoder::Impl::Decode(const BinaryRequest& request) {
|
||||
BinaryResponse response;
|
||||
response.codec = request.codec;
|
||||
response.cmd = request.cmd;
|
||||
response.size = request.size;
|
||||
|
||||
if (!decoder) {
|
||||
LOG_DEBUG(Audio_DSP, "Decoder not initalized");
|
||||
// This is a hack to continue games that are not compiled with the aac codec
|
||||
response.num_channels = 2;
|
||||
response.num_samples = 1024;
|
||||
return response;
|
||||
}
|
||||
|
||||
if (request.src_addr < Memory::FCRAM_PADDR ||
|
||||
request.src_addr + request.size > Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds src_addr {:08x}", request.src_addr);
|
||||
return {};
|
||||
}
|
||||
u8* data = memory.GetFCRAMPointer(request.src_addr - Memory::FCRAM_PADDR);
|
||||
|
||||
std::array<std::vector<s16>, 2> out_streams;
|
||||
|
||||
std::size_t data_size = request.size;
|
||||
|
||||
// decoding loops
|
||||
AAC_DECODER_ERROR result = AAC_DEC_OK;
|
||||
// 8192 units of s16 are enough to hold one frame of AAC-LC or AAC-HE/v2 data
|
||||
s16 decoder_output[8192];
|
||||
// note that we don't free this pointer as it is automatically freed by fdk_aac
|
||||
CStreamInfo* stream_info;
|
||||
// how many bytes to be queued into the decoder, decrementing from the buffer size
|
||||
u32 buffer_remaining = data_size;
|
||||
// alias the data_size as an u32
|
||||
u32 input_size = data_size;
|
||||
|
||||
while (buffer_remaining) {
|
||||
// queue the input buffer, fdk_aac will automatically slice out the buffer it needs
|
||||
// from the input buffer
|
||||
result = aacDecoder_Fill(decoder, &data, &input_size, &buffer_remaining);
|
||||
if (result != AAC_DEC_OK) {
|
||||
// there are some issues when queuing the input buffer
|
||||
LOG_ERROR(Audio_DSP, "Failed to enqueue the input samples");
|
||||
return std::nullopt;
|
||||
}
|
||||
// get output from decoder
|
||||
result = aacDecoder_DecodeFrame(decoder, decoder_output, 8192, 0);
|
||||
if (result == AAC_DEC_OK) {
|
||||
// get the stream information
|
||||
stream_info = aacDecoder_GetStreamInfo(decoder);
|
||||
// fill the stream information for binary response
|
||||
response.num_channels = stream_info->aacNumChannels;
|
||||
response.num_samples = stream_info->frameSize;
|
||||
// fill the output
|
||||
// the sample size = frame_size * channel_counts
|
||||
for (int sample = 0; sample < (stream_info->frameSize * 2); sample++) {
|
||||
for (int ch = 0; ch < stream_info->aacNumChannels; ch++) {
|
||||
out_streams[ch].push_back(decoder_output[(sample * 2) + 1]);
|
||||
}
|
||||
}
|
||||
} else if (result == AAC_DEC_TRANSPORT_SYNC_ERROR) {
|
||||
// decoder has some synchronization problems, try again with new samples,
|
||||
// using old samples might trigger this error again
|
||||
continue;
|
||||
} else {
|
||||
LOG_ERROR(Audio_DSP, "Error decoding the sample: {}", result);
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
// transfer the decoded buffer from vector to the FCRAM
|
||||
if (out_streams[0].size() != 0) {
|
||||
if (request.dst_addr_ch0 < Memory::FCRAM_PADDR ||
|
||||
request.dst_addr_ch0 + out_streams[0].size() >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds dst_addr_ch0 {:08x}", request.dst_addr_ch0);
|
||||
return {};
|
||||
}
|
||||
std::memcpy(memory.GetFCRAMPointer(request.dst_addr_ch0 - Memory::FCRAM_PADDR),
|
||||
out_streams[0].data(), out_streams[0].size());
|
||||
}
|
||||
|
||||
if (out_streams[1].size() != 0) {
|
||||
if (request.dst_addr_ch1 < Memory::FCRAM_PADDR ||
|
||||
request.dst_addr_ch1 + out_streams[1].size() >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds dst_addr_ch1 {:08x}", request.dst_addr_ch1);
|
||||
return {};
|
||||
}
|
||||
std::memcpy(memory.GetFCRAMPointer(request.dst_addr_ch1 - Memory::FCRAM_PADDR),
|
||||
out_streams[1].data(), out_streams[1].size());
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
FDKDecoder::FDKDecoder(Memory::MemorySystem& memory) : impl(std::make_unique<Impl>(memory)) {}
|
||||
|
||||
FDKDecoder::~FDKDecoder() = default;
|
||||
|
||||
std::optional<BinaryResponse> FDKDecoder::ProcessRequest(const BinaryRequest& request) {
|
||||
return impl->ProcessRequest(request);
|
||||
}
|
||||
|
||||
bool FDKDecoder::IsValid() const {
|
||||
return impl->IsValid();
|
||||
}
|
||||
|
||||
} // namespace AudioCore::HLE
|
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2019 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "audio_core/hle/decoder.h"
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
class FDKDecoder final : public DecoderBase {
|
||||
public:
|
||||
explicit FDKDecoder(Memory::MemorySystem& memory);
|
||||
~FDKDecoder() override;
|
||||
std::optional<BinaryResponse> ProcessRequest(const BinaryRequest& request) override;
|
||||
bool IsValid() const override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace AudioCore::HLE
|
|
@ -13,6 +13,8 @@
|
|||
#include "audio_core/hle/wmf_decoder.h"
|
||||
#elif HAVE_FFMPEG
|
||||
#include "audio_core/hle/ffmpeg_decoder.h"
|
||||
#elif HAVE_FDK
|
||||
#include "audio_core/hle/fdk_decoder.h"
|
||||
#endif
|
||||
#include "audio_core/hle/common.h"
|
||||
#include "audio_core/hle/decoder.h"
|
||||
|
@ -124,6 +126,8 @@ DspHle::Impl::Impl(DspHle& parent_, Memory::MemorySystem& memory) : parent(paren
|
|||
decoder = std::make_unique<HLE::WMFDecoder>(memory);
|
||||
#elif defined(HAVE_FFMPEG)
|
||||
decoder = std::make_unique<HLE::FFMPEGDecoder>(memory);
|
||||
#elif defined(HAVE_FDK)
|
||||
decoder = std::make_unique<HLE::FDKDecoder>(memory);
|
||||
#else
|
||||
LOG_WARNING(Audio_DSP, "No decoder found, this could lead to missing audio");
|
||||
decoder = std::make_unique<HLE::NullDecoder>();
|
||||
|
|
|
@ -25,10 +25,6 @@ ConfigureGeneral::ConfigureGeneral(QWidget* parent)
|
|||
ConfigureGeneral::~ConfigureGeneral() = default;
|
||||
|
||||
void ConfigureGeneral::SetConfiguration() {
|
||||
ui->toggle_frame_limit->setChecked(Settings::values.use_frame_limit);
|
||||
ui->frame_limit->setEnabled(ui->toggle_frame_limit->isChecked());
|
||||
ui->frame_limit->setValue(Settings::values.frame_limit);
|
||||
|
||||
ui->toggle_check_exit->setChecked(UISettings::values.confirm_before_closing);
|
||||
ui->toggle_background_pause->setChecked(UISettings::values.pause_when_in_background);
|
||||
|
||||
|
@ -57,9 +53,6 @@ void ConfigureGeneral::ResetDefaults() {
|
|||
}
|
||||
|
||||
void ConfigureGeneral::ApplyConfiguration() {
|
||||
Settings::values.use_frame_limit = ui->toggle_frame_limit->isChecked();
|
||||
Settings::values.frame_limit = ui->frame_limit->value();
|
||||
|
||||
UISettings::values.confirm_before_closing = ui->toggle_check_exit->isChecked();
|
||||
UISettings::values.pause_when_in_background = ui->toggle_background_pause->isChecked();
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ void IPCRecorderWidget::SetEnabled(bool enabled) {
|
|||
}
|
||||
|
||||
void IPCRecorderWidget::Clear() {
|
||||
id_offset = records.size() + 1;
|
||||
id_offset += records.size();
|
||||
|
||||
records.clear();
|
||||
ui->main->invisibleRootItem()->takeChildren();
|
||||
|
|
|
@ -61,13 +61,14 @@ void RegistersWidget::OnDebugModeEntered() {
|
|||
if (!Core::System::GetInstance().IsPoweredOn())
|
||||
return;
|
||||
|
||||
// Todo: Handle all cores
|
||||
for (int i = 0; i < core_registers->childCount(); ++i)
|
||||
core_registers->child(i)->setText(
|
||||
1, QStringLiteral("0x%1").arg(Core::CPU().GetReg(i), 8, 16, QLatin1Char('0')));
|
||||
1, QStringLiteral("0x%1").arg(Core::GetCore(0).GetReg(i), 8, 16, QLatin1Char('0')));
|
||||
|
||||
for (int i = 0; i < vfp_registers->childCount(); ++i)
|
||||
vfp_registers->child(i)->setText(
|
||||
1, QStringLiteral("0x%1").arg(Core::CPU().GetVFPReg(i), 8, 16, QLatin1Char('0')));
|
||||
1, QStringLiteral("0x%1").arg(Core::GetCore(0).GetVFPReg(i), 8, 16, QLatin1Char('0')));
|
||||
|
||||
UpdateCPSRValues();
|
||||
UpdateVFPSystemRegisterValues();
|
||||
|
@ -127,7 +128,8 @@ void RegistersWidget::CreateCPSRChildren() {
|
|||
}
|
||||
|
||||
void RegistersWidget::UpdateCPSRValues() {
|
||||
const u32 cpsr_val = Core::CPU().GetCPSR();
|
||||
// Todo: Handle all cores
|
||||
const u32 cpsr_val = Core::GetCore(0).GetCPSR();
|
||||
|
||||
cpsr->setText(1, QStringLiteral("0x%1").arg(cpsr_val, 8, 16, QLatin1Char('0')));
|
||||
cpsr->child(0)->setText(
|
||||
|
@ -191,10 +193,11 @@ void RegistersWidget::CreateVFPSystemRegisterChildren() {
|
|||
}
|
||||
|
||||
void RegistersWidget::UpdateVFPSystemRegisterValues() {
|
||||
const u32 fpscr_val = Core::CPU().GetVFPSystemReg(VFP_FPSCR);
|
||||
const u32 fpexc_val = Core::CPU().GetVFPSystemReg(VFP_FPEXC);
|
||||
const u32 fpinst_val = Core::CPU().GetVFPSystemReg(VFP_FPINST);
|
||||
const u32 fpinst2_val = Core::CPU().GetVFPSystemReg(VFP_FPINST2);
|
||||
// Todo: handle all cores
|
||||
const u32 fpscr_val = Core::GetCore(0).GetVFPSystemReg(VFP_FPSCR);
|
||||
const u32 fpexc_val = Core::GetCore(0).GetVFPSystemReg(VFP_FPEXC);
|
||||
const u32 fpinst_val = Core::GetCore(0).GetVFPSystemReg(VFP_FPINST);
|
||||
const u32 fpinst2_val = Core::GetCore(0).GetVFPSystemReg(VFP_FPINST2);
|
||||
|
||||
QTreeWidgetItem* const fpscr = vfp_system_registers->child(0);
|
||||
fpscr->setText(1, QStringLiteral("0x%1").arg(fpscr_val, 8, 16, QLatin1Char('0')));
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/timer.h"
|
||||
#include "core/hle/kernel/wait_object.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
WaitTreeItem::~WaitTreeItem() = default;
|
||||
|
||||
|
@ -51,13 +52,17 @@ std::size_t WaitTreeItem::Row() const {
|
|||
}
|
||||
|
||||
std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList() {
|
||||
const auto& threads = Core::System::GetInstance().Kernel().GetThreadManager().GetThreadList();
|
||||
u32 num_cores = Core::GetNumCores();
|
||||
std::vector<std::unique_ptr<WaitTreeThread>> item_list;
|
||||
item_list.reserve(threads.size());
|
||||
for (u32 i = 0; i < num_cores; ++i) {
|
||||
const auto& threads =
|
||||
Core::System::GetInstance().Kernel().GetThreadManager(i).GetThreadList();
|
||||
item_list.reserve(item_list.size() + threads.size());
|
||||
for (std::size_t i = 0; i < threads.size(); ++i) {
|
||||
item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i]));
|
||||
item_list.back()->row = i;
|
||||
}
|
||||
}
|
||||
return item_list;
|
||||
}
|
||||
|
||||
|
|
|
@ -468,6 +468,8 @@ void GameList::AddGamePopup(QMenu& context_menu, const QString& path, u64 progra
|
|||
QAction* open_texture_dump_location = context_menu.addAction(tr("Open Texture Dump Location"));
|
||||
QAction* open_texture_load_location =
|
||||
context_menu.addAction(tr("Open Custom Texture Location"));
|
||||
QAction* open_mods_location = context_menu.addAction(tr("Open Mods Location"));
|
||||
QAction* dump_romfs = context_menu.addAction(tr("Dump RomFS"));
|
||||
QAction* navigate_to_gamedb_entry = context_menu.addAction(tr("Navigate to GameDB entry"));
|
||||
|
||||
const bool is_application =
|
||||
|
@ -497,6 +499,8 @@ void GameList::AddGamePopup(QMenu& context_menu, const QString& path, u64 progra
|
|||
|
||||
open_texture_dump_location->setVisible(is_application);
|
||||
open_texture_load_location->setVisible(is_application);
|
||||
open_mods_location->setVisible(is_application);
|
||||
dump_romfs->setVisible(is_application);
|
||||
|
||||
navigate_to_gamedb_entry->setVisible(it != compatibility_list.end());
|
||||
|
||||
|
@ -526,6 +530,15 @@ void GameList::AddGamePopup(QMenu& context_menu, const QString& path, u64 progra
|
|||
emit OpenFolderRequested(program_id, GameListOpenTarget::TEXTURE_LOAD);
|
||||
}
|
||||
});
|
||||
connect(open_mods_location, &QAction::triggered, [this, program_id] {
|
||||
if (FileUtil::CreateFullPath(fmt::format("{}mods/{:016X}/",
|
||||
FileUtil::GetUserPath(FileUtil::UserPath::LoadDir),
|
||||
program_id))) {
|
||||
emit OpenFolderRequested(program_id, GameListOpenTarget::MODS);
|
||||
}
|
||||
});
|
||||
connect(dump_romfs, &QAction::triggered,
|
||||
[this, path, program_id] { emit DumpRomFSRequested(path, program_id); });
|
||||
connect(navigate_to_gamedb_entry, &QAction::triggered, [this, program_id]() {
|
||||
emit NavigateToGamedbEntryRequested(program_id, compatibility_list);
|
||||
});
|
||||
|
|
|
@ -35,7 +35,8 @@ enum class GameListOpenTarget {
|
|||
APPLICATION = 2,
|
||||
UPDATE_DATA = 3,
|
||||
TEXTURE_DUMP = 4,
|
||||
TEXTURE_LOAD = 5
|
||||
TEXTURE_LOAD = 5,
|
||||
MODS = 6,
|
||||
};
|
||||
|
||||
class GameList : public QWidget {
|
||||
|
@ -81,6 +82,7 @@ signals:
|
|||
void OpenFolderRequested(u64 program_id, GameListOpenTarget target);
|
||||
void NavigateToGamedbEntryRequested(u64 program_id,
|
||||
const CompatibilityList& compatibility_list);
|
||||
void DumpRomFSRequested(QString game_path, u64 program_id);
|
||||
void OpenDirectory(const QString& directory);
|
||||
void AddDirectory();
|
||||
void ShowList(bool show);
|
||||
|
|
|
@ -597,6 +597,7 @@ void GMainWindow::ConnectWidgetEvents() {
|
|||
connect(game_list, &GameList::OpenFolderRequested, this, &GMainWindow::OnGameListOpenFolder);
|
||||
connect(game_list, &GameList::NavigateToGamedbEntryRequested, this,
|
||||
&GMainWindow::OnGameListNavigateToGamedbEntry);
|
||||
connect(game_list, &GameList::DumpRomFSRequested, this, &GMainWindow::OnGameListDumpRomFS);
|
||||
connect(game_list, &GameList::AddDirectory, this, &GMainWindow::OnGameListAddDirectory);
|
||||
connect(game_list_placeholder, &GameListPlaceholder::AddDirectory, this,
|
||||
&GMainWindow::OnGameListAddDirectory);
|
||||
|
@ -1231,6 +1232,11 @@ void GMainWindow::OnGameListOpenFolder(u64 data_id, GameListOpenTarget target) {
|
|||
path = fmt::format("{}textures/{:016X}/",
|
||||
FileUtil::GetUserPath(FileUtil::UserPath::LoadDir), data_id);
|
||||
break;
|
||||
case GameListOpenTarget::MODS:
|
||||
open_target = "Mods";
|
||||
path = fmt::format("{}mods/{:016X}/", FileUtil::GetUserPath(FileUtil::UserPath::LoadDir),
|
||||
data_id);
|
||||
break;
|
||||
default:
|
||||
LOG_ERROR(Frontend, "Unexpected target {}", static_cast<int>(target));
|
||||
return;
|
||||
|
@ -1262,6 +1268,46 @@ void GMainWindow::OnGameListNavigateToGamedbEntry(u64 program_id,
|
|||
QDesktopServices::openUrl(QUrl(QStringLiteral("https://citra-emu.org/game/") + directory));
|
||||
}
|
||||
|
||||
void GMainWindow::OnGameListDumpRomFS(QString game_path, u64 program_id) {
|
||||
auto* dialog = new QProgressDialog(tr("Dumping..."), tr("Cancel"), 0, 0, this);
|
||||
dialog->setWindowModality(Qt::WindowModal);
|
||||
dialog->setWindowFlags(dialog->windowFlags() &
|
||||
~(Qt::WindowCloseButtonHint | Qt::WindowContextHelpButtonHint));
|
||||
dialog->setCancelButton(nullptr);
|
||||
dialog->setMinimumDuration(0);
|
||||
dialog->setValue(0);
|
||||
|
||||
const auto base_path = fmt::format(
|
||||
"{}romfs/{:016X}", FileUtil::GetUserPath(FileUtil::UserPath::DumpDir), program_id);
|
||||
const auto update_path =
|
||||
fmt::format("{}romfs/{:016X}", FileUtil::GetUserPath(FileUtil::UserPath::DumpDir),
|
||||
program_id | 0x0004000e00000000);
|
||||
using FutureWatcher = QFutureWatcher<std::pair<Loader::ResultStatus, Loader::ResultStatus>>;
|
||||
auto* future_watcher = new FutureWatcher(this);
|
||||
connect(future_watcher, &FutureWatcher::finished,
|
||||
[this, program_id, dialog, base_path, update_path, future_watcher] {
|
||||
dialog->hide();
|
||||
const auto& [base, update] = future_watcher->result();
|
||||
if (base != Loader::ResultStatus::Success) {
|
||||
QMessageBox::critical(
|
||||
this, tr("Citra"),
|
||||
tr("Could not dump base RomFS.\nRefer to the log for details."));
|
||||
return;
|
||||
}
|
||||
QDesktopServices::openUrl(QUrl::fromLocalFile(QString::fromStdString(base_path)));
|
||||
if (update == Loader::ResultStatus::Success) {
|
||||
QDesktopServices::openUrl(
|
||||
QUrl::fromLocalFile(QString::fromStdString(update_path)));
|
||||
}
|
||||
});
|
||||
|
||||
auto future = QtConcurrent::run([game_path, base_path, update_path] {
|
||||
std::unique_ptr<Loader::AppLoader> loader = Loader::GetLoader(game_path.toStdString());
|
||||
return std::make_pair(loader->DumpRomFS(base_path), loader->DumpUpdateRomFS(update_path));
|
||||
});
|
||||
future_watcher->setFuture(future);
|
||||
}
|
||||
|
||||
void GMainWindow::OnGameListOpenDirectory(const QString& directory) {
|
||||
QString path;
|
||||
if (directory == QStringLiteral("INSTALLED")) {
|
||||
|
|
|
@ -176,6 +176,7 @@ private slots:
|
|||
void OnGameListOpenFolder(u64 program_id, GameListOpenTarget target);
|
||||
void OnGameListNavigateToGamedbEntry(u64 program_id,
|
||||
const CompatibilityList& compatibility_list);
|
||||
void OnGameListDumpRomFS(QString game_path, u64 program_id);
|
||||
void OnGameListOpenDirectory(const QString& directory);
|
||||
void OnGameListAddDirectory();
|
||||
void OnGameListShowList(bool show);
|
||||
|
|
|
@ -72,6 +72,8 @@ add_library(core STATIC
|
|||
file_sys/delay_generator.h
|
||||
file_sys/ivfc_archive.cpp
|
||||
file_sys/ivfc_archive.h
|
||||
file_sys/layered_fs.cpp
|
||||
file_sys/layered_fs.h
|
||||
file_sys/ncch_container.cpp
|
||||
file_sys/ncch_container.h
|
||||
file_sys/patch.cpp
|
||||
|
@ -469,9 +471,17 @@ create_target_directory_groups(core)
|
|||
|
||||
target_link_libraries(core PUBLIC common PRIVATE audio_core network video_core)
|
||||
target_link_libraries(core PUBLIC Boost::boost PRIVATE cryptopp fmt open_source_archives Boost::serialization)
|
||||
|
||||
if (ENABLE_WEB_SERVICE)
|
||||
target_compile_definitions(core PRIVATE -DENABLE_WEB_SERVICE)
|
||||
target_link_libraries(core PRIVATE web_service)
|
||||
get_directory_property(OPENSSL_LIBS
|
||||
DIRECTORY ${PROJECT_SOURCE_DIR}/externals/libressl
|
||||
DEFINITION OPENSSL_LIBS)
|
||||
|
||||
target_compile_definitions(core PRIVATE -DENABLE_WEB_SERVICE -DCPPHTTPLIB_OPENSSL_SUPPORT)
|
||||
target_link_libraries(core PRIVATE web_service ${OPENSSL_LIBS} httplib lurlparser)
|
||||
if (ANDROID)
|
||||
target_link_libraries(core PRIVATE ifaddrs)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (ARCHITECTURE_x86_64)
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "common/common_types.h"
|
||||
#include "core/arm/skyeye_common/arm_regformat.h"
|
||||
#include "core/arm/skyeye_common/vfp/asm_vfp.h"
|
||||
#include "core/core_timing.h"
|
||||
|
||||
namespace Memory {
|
||||
struct PageTable;
|
||||
|
@ -18,6 +19,8 @@ struct PageTable;
|
|||
/// Generic ARM11 CPU interface
|
||||
class ARM_Interface : NonCopyable {
|
||||
public:
|
||||
explicit ARM_Interface(u32 id, std::shared_ptr<Core::Timing::Timer> timer)
|
||||
: timer(timer), id(id){};
|
||||
virtual ~ARM_Interface() {}
|
||||
|
||||
class ThreadContext {
|
||||
|
@ -222,11 +225,26 @@ public:
|
|||
|
||||
virtual void PurgeState() = 0;
|
||||
|
||||
std::shared_ptr<Core::Timing::Timer> GetTimer() {
|
||||
return timer;
|
||||
}
|
||||
|
||||
u32 GetID() const {
|
||||
return id;
|
||||
}
|
||||
|
||||
protected:
|
||||
std::shared_ptr<Core::Timing::Timer> timer;
|
||||
|
||||
private:
|
||||
u32 id;
|
||||
|
||||
friend class boost::serialization::access;
|
||||
|
||||
template <class Archive>
|
||||
void save(Archive& ar, const unsigned int file_version) const {
|
||||
ar << timer;
|
||||
ar << id;
|
||||
auto page_table = GetPageTable();
|
||||
ar << page_table;
|
||||
for (auto i = 0; i < 15; i++) {
|
||||
|
@ -254,6 +272,8 @@ private:
|
|||
template <class Archive>
|
||||
void load(Archive& ar, const unsigned int file_version) {
|
||||
PurgeState();
|
||||
ar >> timer;
|
||||
ar >> id;
|
||||
std::shared_ptr<Memory::PageTable> page_table = nullptr;
|
||||
ar >> page_table;
|
||||
SetPageTable(page_table);
|
||||
|
|
|
@ -72,8 +72,7 @@ private:
|
|||
class DynarmicUserCallbacks final : public Dynarmic::A32::UserCallbacks {
|
||||
public:
|
||||
explicit DynarmicUserCallbacks(ARM_Dynarmic& parent)
|
||||
: parent(parent), timing(parent.system.CoreTiming()), svc_context(parent.system),
|
||||
memory(parent.memory) {}
|
||||
: parent(parent), svc_context(parent.system), memory(parent.memory) {}
|
||||
~DynarmicUserCallbacks() = default;
|
||||
|
||||
std::uint8_t MemoryRead8(VAddr vaddr) override {
|
||||
|
@ -137,7 +136,7 @@ public:
|
|||
parent.jit->HaltExecution();
|
||||
parent.SetPC(pc);
|
||||
Kernel::Thread* thread =
|
||||
parent.system.Kernel().GetThreadManager().GetCurrentThread();
|
||||
parent.system.Kernel().GetCurrentThreadManager().GetCurrentThread();
|
||||
parent.SaveContext(thread->context);
|
||||
GDBStub::Break();
|
||||
GDBStub::SendTrap(thread, 5);
|
||||
|
@ -150,22 +149,23 @@ public:
|
|||
}
|
||||
|
||||
void AddTicks(std::uint64_t ticks) override {
|
||||
timing.AddTicks(ticks);
|
||||
parent.GetTimer()->AddTicks(ticks);
|
||||
}
|
||||
std::uint64_t GetTicksRemaining() override {
|
||||
s64 ticks = timing.GetDowncount();
|
||||
s64 ticks = parent.GetTimer()->GetDowncount();
|
||||
return static_cast<u64>(ticks <= 0 ? 0 : ticks);
|
||||
}
|
||||
|
||||
ARM_Dynarmic& parent;
|
||||
Core::Timing& timing;
|
||||
Kernel::SVCContext svc_context;
|
||||
Memory::MemorySystem& memory;
|
||||
};
|
||||
|
||||
ARM_Dynarmic::ARM_Dynarmic(Core::System* system, Memory::MemorySystem& memory,
|
||||
PrivilegeMode initial_mode)
|
||||
: system(*system), memory(memory), cb(std::make_unique<DynarmicUserCallbacks>(*this)) {
|
||||
PrivilegeMode initial_mode, u32 id,
|
||||
std::shared_ptr<Core::Timing::Timer> timer)
|
||||
: ARM_Interface(id, timer), system(*system), memory(memory),
|
||||
cb(std::make_unique<DynarmicUserCallbacks>(*this)) {
|
||||
interpreter_state = std::make_shared<ARMul_State>(system, memory, initial_mode);
|
||||
SetPageTable(memory.GetCurrentPageTable());
|
||||
}
|
||||
|
|
|
@ -24,7 +24,8 @@ class DynarmicUserCallbacks;
|
|||
|
||||
class ARM_Dynarmic final : public ARM_Interface {
|
||||
public:
|
||||
ARM_Dynarmic(Core::System* system, Memory::MemorySystem& memory, PrivilegeMode initial_mode);
|
||||
ARM_Dynarmic(Core::System* system, Memory::MemorySystem& memory, PrivilegeMode initial_mode,
|
||||
u32 id, std::shared_ptr<Core::Timing::Timer> timer);
|
||||
~ARM_Dynarmic() override;
|
||||
|
||||
void Run() override;
|
||||
|
|
|
@ -69,8 +69,9 @@ private:
|
|||
};
|
||||
|
||||
ARM_DynCom::ARM_DynCom(Core::System* system, Memory::MemorySystem& memory,
|
||||
PrivilegeMode initial_mode)
|
||||
: system(system) {
|
||||
PrivilegeMode initial_mode, u32 id,
|
||||
std::shared_ptr<Core::Timing::Timer> timer)
|
||||
: ARM_Interface(id, timer), system(system) {
|
||||
state = std::make_unique<ARMul_State>(system, memory, initial_mode);
|
||||
}
|
||||
|
||||
|
@ -78,7 +79,7 @@ ARM_DynCom::~ARM_DynCom() {}
|
|||
|
||||
void ARM_DynCom::Run() {
|
||||
DEBUG_ASSERT(system != nullptr);
|
||||
ExecuteInstructions(std::max<s64>(system->CoreTiming().GetDowncount(), 0));
|
||||
ExecuteInstructions(std::max<s64>(timer->GetDowncount(), 0));
|
||||
}
|
||||
|
||||
void ARM_DynCom::Step() {
|
||||
|
@ -156,7 +157,7 @@ void ARM_DynCom::ExecuteInstructions(u64 num_instructions) {
|
|||
state->NumInstrsToExecute = num_instructions;
|
||||
unsigned ticks_executed = InterpreterMainLoop(state.get());
|
||||
if (system != nullptr) {
|
||||
system->CoreTiming().AddTicks(ticks_executed);
|
||||
timer->AddTicks(ticks_executed);
|
||||
}
|
||||
state->ServeBreak();
|
||||
}
|
||||
|
|
|
@ -21,7 +21,8 @@ class MemorySystem;
|
|||
class ARM_DynCom final : public ARM_Interface {
|
||||
public:
|
||||
explicit ARM_DynCom(Core::System* system, Memory::MemorySystem& memory,
|
||||
PrivilegeMode initial_mode);
|
||||
PrivilegeMode initial_mode, u32 id,
|
||||
std::shared_ptr<Core::Timing::Timer> timer);
|
||||
~ARM_DynCom() override;
|
||||
|
||||
void Run() override;
|
||||
|
|
|
@ -3865,7 +3865,7 @@ SWI_INST : {
|
|||
if (inst_base->cond == ConditionCode::AL || CondPassed(cpu, inst_base->cond)) {
|
||||
DEBUG_ASSERT(cpu->system != nullptr);
|
||||
swi_inst* const inst_cream = (swi_inst*)inst_base->component;
|
||||
cpu->system->CoreTiming().AddTicks(num_instrs);
|
||||
cpu->system->GetRunningCore().GetTimer()->AddTicks(num_instrs);
|
||||
cpu->NumInstrsToExecute =
|
||||
num_instrs >= cpu->NumInstrsToExecute ? 0 : cpu->NumInstrsToExecute - num_instrs;
|
||||
num_instrs = 0;
|
||||
|
|
|
@ -607,8 +607,8 @@ void ARMul_State::ServeBreak() {
|
|||
}
|
||||
|
||||
DEBUG_ASSERT(system != nullptr);
|
||||
Kernel::Thread* thread = system->Kernel().GetThreadManager().GetCurrentThread();
|
||||
system->CPU().SaveContext(thread->context);
|
||||
Kernel::Thread* thread = system->Kernel().GetCurrentThreadManager().GetCurrentThread();
|
||||
system->GetRunningCore().SaveContext(thread->context);
|
||||
|
||||
if (last_bkpt_hit || GDBStub::IsMemoryBreak() || GDBStub::GetCpuStepFlag()) {
|
||||
last_bkpt_hit = false;
|
||||
|
|
|
@ -35,7 +35,7 @@ static inline std::enable_if_t<std::is_integral_v<T>> WriteOp(const GatewayCheat
|
|||
Core::System& system) {
|
||||
u32 addr = line.address + state.offset;
|
||||
write_func(addr, static_cast<T>(line.value));
|
||||
system.CPU().InvalidateCacheRange(addr, sizeof(T));
|
||||
system.InvalidateCacheRange(addr, sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T, typename ReadFunction, typename CompareFunc>
|
||||
|
@ -105,7 +105,7 @@ static inline std::enable_if_t<std::is_integral_v<T>> IncrementiveWriteOp(
|
|||
Core::System& system) {
|
||||
u32 addr = line.value + state.offset;
|
||||
write_func(addr, static_cast<T>(state.reg));
|
||||
system.CPU().InvalidateCacheRange(addr, sizeof(T));
|
||||
system.InvalidateCacheRange(addr, sizeof(T));
|
||||
state.offset += sizeof(T);
|
||||
}
|
||||
|
||||
|
@ -143,7 +143,8 @@ static inline void PatchOp(const GatewayCheat::CheatLine& line, State& state, Co
|
|||
}
|
||||
u32 num_bytes = line.value;
|
||||
u32 addr = line.address + state.offset;
|
||||
system.CPU().InvalidateCacheRange(addr, num_bytes);
|
||||
system.InvalidateCacheRange(addr, num_bytes);
|
||||
|
||||
bool first = true;
|
||||
u32 bit_offset = 0;
|
||||
if (num_bytes > 0)
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include <fstream>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <utility>
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include "audio_core/dsp_interface.h"
|
||||
|
@ -65,7 +66,8 @@ System::~System() = default;
|
|||
|
||||
System::ResultStatus System::RunLoop(bool tight_loop) {
|
||||
status = ResultStatus::Success;
|
||||
if (!cpu_core) {
|
||||
if (std::any_of(cpu_cores.begin(), cpu_cores.end(),
|
||||
[](std::shared_ptr<ARM_Interface> ptr) { return ptr == nullptr; })) {
|
||||
return ResultStatus::ErrorNotInitialized;
|
||||
}
|
||||
|
||||
|
@ -83,21 +85,72 @@ System::ResultStatus System::RunLoop(bool tight_loop) {
|
|||
}
|
||||
}
|
||||
|
||||
// If we don't have a currently active thread then don't execute instructions,
|
||||
// instead advance to the next event and try to yield to the next thread
|
||||
if (kernel->GetThreadManager().GetCurrentThread() == nullptr) {
|
||||
LOG_TRACE(Core_ARM11, "Idling");
|
||||
timing->Idle();
|
||||
timing->Advance();
|
||||
// All cores should have executed the same amount of ticks. If this is not the case an event was
|
||||
// scheduled with a cycles_into_future smaller then the current downcount.
|
||||
// So we have to get those cores to the same global time first
|
||||
u64 global_ticks = timing->GetGlobalTicks();
|
||||
s64 max_delay = 0;
|
||||
std::shared_ptr<ARM_Interface> current_core_to_execute = nullptr;
|
||||
for (auto& cpu_core : cpu_cores) {
|
||||
if (cpu_core->GetTimer()->GetTicks() < global_ticks) {
|
||||
s64 delay = global_ticks - cpu_core->GetTimer()->GetTicks();
|
||||
cpu_core->GetTimer()->Advance(delay);
|
||||
if (max_delay < delay) {
|
||||
max_delay = delay;
|
||||
current_core_to_execute = cpu_core;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (max_delay > 0) {
|
||||
LOG_TRACE(Core_ARM11, "Core {} running (delayed) for {} ticks",
|
||||
current_core_to_execute->GetID(),
|
||||
current_core_to_execute->GetTimer()->GetDowncount());
|
||||
running_core = current_core_to_execute.get();
|
||||
kernel->SetRunningCPU(current_core_to_execute);
|
||||
if (kernel->GetCurrentThreadManager().GetCurrentThread() == nullptr) {
|
||||
LOG_TRACE(Core_ARM11, "Core {} idling", current_core_to_execute->GetID());
|
||||
current_core_to_execute->GetTimer()->Idle();
|
||||
PrepareReschedule();
|
||||
} else {
|
||||
if (tight_loop) {
|
||||
current_core_to_execute->Run();
|
||||
} else {
|
||||
current_core_to_execute->Step();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Now all cores are at the same global time. So we will run them one after the other
|
||||
// with a max slice that is the minimum of all max slices of all cores
|
||||
// TODO: Make special check for idle since we can easily revert the time of idle cores
|
||||
s64 max_slice = Timing::MAX_SLICE_LENGTH;
|
||||
for (const auto& cpu_core : cpu_cores) {
|
||||
max_slice = std::min(max_slice, cpu_core->GetTimer()->GetMaxSliceLength());
|
||||
}
|
||||
for (auto& cpu_core : cpu_cores) {
|
||||
cpu_core->GetTimer()->Advance(max_slice);
|
||||
}
|
||||
for (auto& cpu_core : cpu_cores) {
|
||||
LOG_TRACE(Core_ARM11, "Core {} running for {} ticks", cpu_core->GetID(),
|
||||
cpu_core->GetTimer()->GetDowncount());
|
||||
running_core = cpu_core.get();
|
||||
kernel->SetRunningCPU(cpu_core);
|
||||
// If we don't have a currently active thread then don't execute instructions,
|
||||
// instead advance to the next event and try to yield to the next thread
|
||||
if (kernel->GetCurrentThreadManager().GetCurrentThread() == nullptr) {
|
||||
LOG_TRACE(Core_ARM11, "Core {} idling", cpu_core->GetID());
|
||||
cpu_core->GetTimer()->Idle();
|
||||
PrepareReschedule();
|
||||
} else {
|
||||
timing->Advance();
|
||||
if (tight_loop) {
|
||||
cpu_core->Run();
|
||||
} else {
|
||||
cpu_core->Step();
|
||||
}
|
||||
}
|
||||
}
|
||||
timing->AddToGlobalTicks(max_slice);
|
||||
}
|
||||
|
||||
if (GDBStub::IsServerEnabled()) {
|
||||
GDBStub::SetCpuStepFlag(false);
|
||||
|
@ -183,7 +236,9 @@ System::ResultStatus System::Load(Frontend::EmuWindow& emu_window, const std::st
|
|||
}
|
||||
|
||||
ASSERT(system_mode.first);
|
||||
ResultStatus init_result{Init(emu_window, *system_mode.first)};
|
||||
auto n3ds_mode = app_loader->LoadKernelN3dsMode();
|
||||
ASSERT(n3ds_mode.first);
|
||||
ResultStatus init_result{Init(emu_window, *system_mode.first, *n3ds_mode.first)};
|
||||
if (init_result != ResultStatus::Success) {
|
||||
LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
|
||||
static_cast<u32>(init_result));
|
||||
|
@ -235,7 +290,7 @@ System::ResultStatus System::Load(Frontend::EmuWindow& emu_window, const std::st
|
|||
}
|
||||
|
||||
void System::PrepareReschedule() {
|
||||
cpu_core->PrepareReschedule();
|
||||
running_core->PrepareReschedule();
|
||||
reschedule_pending = true;
|
||||
}
|
||||
|
||||
|
@ -249,31 +304,50 @@ void System::Reschedule() {
|
|||
}
|
||||
|
||||
reschedule_pending = false;
|
||||
kernel->GetThreadManager().Reschedule();
|
||||
for (const auto& core : cpu_cores) {
|
||||
LOG_TRACE(Core_ARM11, "Reschedule core {}", core->GetID());
|
||||
kernel->GetThreadManager(core->GetID()).Reschedule();
|
||||
}
|
||||
}
|
||||
|
||||
System::ResultStatus System::Init(Frontend::EmuWindow& emu_window, u32 system_mode) {
|
||||
System::ResultStatus System::Init(Frontend::EmuWindow& emu_window, u32 system_mode, u8 n3ds_mode) {
|
||||
LOG_DEBUG(HW_Memory, "initialized OK");
|
||||
|
||||
std::size_t num_cores = 2;
|
||||
if (Settings::values.is_new_3ds) {
|
||||
num_cores = 4;
|
||||
}
|
||||
|
||||
memory = std::make_unique<Memory::MemorySystem>();
|
||||
|
||||
timing = std::make_unique<Timing>();
|
||||
timing = std::make_unique<Timing>(num_cores);
|
||||
|
||||
kernel = std::make_unique<Kernel::KernelSystem>(*memory, *timing,
|
||||
[this] { PrepareReschedule(); }, system_mode);
|
||||
kernel = std::make_unique<Kernel::KernelSystem>(
|
||||
*memory, *timing, [this] { PrepareReschedule(); }, system_mode, num_cores, n3ds_mode);
|
||||
|
||||
if (Settings::values.use_cpu_jit) {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
cpu_core = std::make_shared<ARM_Dynarmic>(this, *memory, USER32MODE);
|
||||
for (std::size_t i = 0; i < num_cores; ++i) {
|
||||
cpu_cores.push_back(
|
||||
std::make_shared<ARM_Dynarmic>(this, *memory, USER32MODE, i, timing->GetTimer(i)));
|
||||
}
|
||||
#else
|
||||
cpu_core = std::make_shared<ARM_DynCom>(this, *memory, USER32MODE);
|
||||
for (std::size_t i = 0; i < num_cores; ++i) {
|
||||
cpu_cores.push_back(
|
||||
std::make_shared<ARM_DynCom>(this, *memory, USER32MODE, i, timing->GetTimer(i)));
|
||||
}
|
||||
LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
|
||||
#endif
|
||||
} else {
|
||||
cpu_core = std::make_shared<ARM_DynCom>(this, *memory, USER32MODE);
|
||||
for (std::size_t i = 0; i < num_cores; ++i) {
|
||||
cpu_cores.push_back(
|
||||
std::make_shared<ARM_DynCom>(this, *memory, USER32MODE, i, timing->GetTimer(i)));
|
||||
}
|
||||
}
|
||||
running_core = cpu_cores[0].get();
|
||||
|
||||
kernel->SetCPU(cpu_core);
|
||||
kernel->SetCPUs(cpu_cores);
|
||||
kernel->SetRunningCPU(cpu_cores[0]);
|
||||
|
||||
if (Settings::values.enable_dsp_lle) {
|
||||
dsp_core = std::make_unique<AudioCore::DspLle>(*memory,
|
||||
|
@ -296,7 +370,7 @@ System::ResultStatus System::Init(Frontend::EmuWindow& emu_window, u32 system_mo
|
|||
|
||||
HW::Init(*memory);
|
||||
Service::Init(*this);
|
||||
GDBStub::Init();
|
||||
GDBStub::DeferStart();
|
||||
|
||||
VideoCore::ResultStatus result = VideoCore::Init(emu_window, *memory);
|
||||
if (result != VideoCore::ResultStatus::Success) {
|
||||
|
@ -318,6 +392,8 @@ System::ResultStatus System::Init(Frontend::EmuWindow& emu_window, u32 system_mo
|
|||
|
||||
LOG_DEBUG(Core, "Initialized OK");
|
||||
|
||||
initalized = true;
|
||||
|
||||
return ResultStatus::Success;
|
||||
}
|
||||
|
||||
|
@ -421,9 +497,10 @@ void System::Shutdown() {
|
|||
perf_stats.reset();
|
||||
rpc_server.reset();
|
||||
cheat_engine.reset();
|
||||
archive_manager.reset();
|
||||
service_manager.reset();
|
||||
dsp_core.reset();
|
||||
cpu_core.reset();
|
||||
cpu_cores.clear();
|
||||
kernel.reset();
|
||||
timing.reset();
|
||||
app_loader.reset();
|
||||
|
@ -452,11 +529,18 @@ void System::Reset() {
|
|||
|
||||
template <class Archive>
|
||||
void System::serialize(Archive& ar, const unsigned int file_version) {
|
||||
u32 num_cores;
|
||||
ar& num_cores;
|
||||
if (num_cores != this->GetNumCores()) {
|
||||
throw std::runtime_error("Wrong N3DS mode");
|
||||
}
|
||||
// flush on save, don't flush on load
|
||||
bool should_flush = !Archive::is_loading::value;
|
||||
Memory::RasterizerClearAll(should_flush);
|
||||
ar&* timing.get();
|
||||
ar&* cpu_core.get();
|
||||
for (int i = 0; i < num_cores; i++) {
|
||||
ar&* cpu_cores[i].get();
|
||||
}
|
||||
ar&* service_manager.get();
|
||||
ar& GPU::g_regs;
|
||||
ar& LCD::g_regs;
|
||||
|
|
|
@ -148,7 +148,10 @@ public:
|
|||
* @returns True if the emulated system is powered on, otherwise false.
|
||||
*/
|
||||
bool IsPoweredOn() const {
|
||||
return cpu_core != nullptr;
|
||||
return cpu_cores.size() > 0 &&
|
||||
std::all_of(cpu_cores.begin(), cpu_cores.end(),
|
||||
[](std::shared_ptr<ARM_Interface> ptr) { return ptr != nullptr; });
|
||||
;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -168,8 +171,29 @@ public:
|
|||
* Gets a reference to the emulated CPU.
|
||||
* @returns A reference to the emulated CPU.
|
||||
*/
|
||||
ARM_Interface& CPU() {
|
||||
return *cpu_core;
|
||||
|
||||
ARM_Interface& GetRunningCore() {
|
||||
return *running_core;
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets a reference to the emulated CPU.
|
||||
* @param core_id The id of the core requested.
|
||||
* @returns A reference to the emulated CPU.
|
||||
*/
|
||||
|
||||
ARM_Interface& GetCore(u32 core_id) {
|
||||
return *cpu_cores[core_id];
|
||||
};
|
||||
|
||||
u32 GetNumCores() const {
|
||||
return static_cast<u32>(cpu_cores.size());
|
||||
}
|
||||
|
||||
void InvalidateCacheRange(u32 start_address, std::size_t length) {
|
||||
for (const auto& cpu : cpu_cores) {
|
||||
cpu->InvalidateCacheRange(start_address, length);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -291,7 +315,7 @@ private:
|
|||
* @param system_mode The system mode.
|
||||
* @return ResultStatus code, indicating if the operation succeeded.
|
||||
*/
|
||||
ResultStatus Init(Frontend::EmuWindow& emu_window, u32 system_mode);
|
||||
ResultStatus Init(Frontend::EmuWindow& emu_window, u32 system_mode, u8 n3ds_mode);
|
||||
|
||||
/// Reschedule the core emulation
|
||||
void Reschedule();
|
||||
|
@ -300,7 +324,8 @@ private:
|
|||
std::unique_ptr<Loader::AppLoader> app_loader;
|
||||
|
||||
/// ARM11 CPU core
|
||||
std::shared_ptr<ARM_Interface> cpu_core;
|
||||
std::vector<std::shared_ptr<ARM_Interface>> cpu_cores;
|
||||
ARM_Interface* running_core = nullptr;
|
||||
|
||||
/// DSP core
|
||||
std::unique_ptr<AudioCore::DspInterface> dsp_core;
|
||||
|
@ -342,6 +367,8 @@ private:
|
|||
private:
|
||||
static System s_instance;
|
||||
|
||||
bool initalized = false;
|
||||
|
||||
ResultStatus status = ResultStatus::Success;
|
||||
std::string status_details = "";
|
||||
/// Saved variables for reset
|
||||
|
@ -358,8 +385,16 @@ private:
|
|||
void serialize(Archive& ar, const unsigned int file_version);
|
||||
};
|
||||
|
||||
inline ARM_Interface& CPU() {
|
||||
return System::GetInstance().CPU();
|
||||
inline ARM_Interface& GetRunningCore() {
|
||||
return System::GetInstance().GetRunningCore();
|
||||
}
|
||||
|
||||
inline ARM_Interface& GetCore(u32 core_id) {
|
||||
return System::GetInstance().GetCore(core_id);
|
||||
}
|
||||
|
||||
inline u32 GetNumCores() {
|
||||
return System::GetInstance().GetNumCores();
|
||||
}
|
||||
|
||||
inline AudioCore::DspInterface& DSP() {
|
||||
|
|
|
@ -14,14 +14,22 @@ namespace Core {
|
|||
Timing* Timing::deserializing = nullptr;
|
||||
|
||||
// Sort by time, unless the times are the same, in which case sort by the order added to the queue
|
||||
bool Timing::Event::operator>(const Event& right) const {
|
||||
bool Timing::Event::operator>(const Timing::Event& right) const {
|
||||
return std::tie(time, fifo_order) > std::tie(right.time, right.fifo_order);
|
||||
}
|
||||
|
||||
bool Timing::Event::operator<(const Event& right) const {
|
||||
bool Timing::Event::operator<(const Timing::Event& right) const {
|
||||
return std::tie(time, fifo_order) < std::tie(right.time, right.fifo_order);
|
||||
}
|
||||
|
||||
Timing::Timing(std::size_t num_cores) {
|
||||
timers.resize(num_cores);
|
||||
for (std::size_t i = 0; i < num_cores; ++i) {
|
||||
timers[i] = std::make_shared<Timer>();
|
||||
}
|
||||
current_timer = timers[0];
|
||||
}
|
||||
|
||||
TimingEventType* Timing::RegisterEvent(const std::string& name, TimedCallback callback) {
|
||||
// check for existing type with same name.
|
||||
// we want event type names to remain unique so that we can use them for serialization.
|
||||
|
@ -34,73 +42,102 @@ TimingEventType* Timing::RegisterEvent(const std::string& name, TimedCallback ca
|
|||
return event_type;
|
||||
}
|
||||
|
||||
Timing::~Timing() {
|
||||
void Timing::ScheduleEvent(s64 cycles_into_future, const TimingEventType* event_type, u64 userdata,
|
||||
std::size_t core_id) {
|
||||
ASSERT(event_type != nullptr);
|
||||
std::shared_ptr<Timing::Timer> timer;
|
||||
if (core_id == std::numeric_limits<std::size_t>::max()) {
|
||||
timer = current_timer;
|
||||
} else {
|
||||
ASSERT(core_id < timers.size());
|
||||
timer = timers.at(core_id);
|
||||
}
|
||||
|
||||
s64 timeout = timer->GetTicks() + cycles_into_future;
|
||||
if (current_timer == timer) {
|
||||
// If this event needs to be scheduled before the next advance(), force one early
|
||||
if (!timer->is_timer_sane)
|
||||
timer->ForceExceptionCheck(cycles_into_future);
|
||||
|
||||
timer->event_queue.emplace_back(
|
||||
Event{timeout, timer->event_fifo_id++, userdata, event_type});
|
||||
std::push_heap(timer->event_queue.begin(), timer->event_queue.end(), std::greater<>());
|
||||
} else {
|
||||
timer->ts_queue.Push(Event{static_cast<s64>(timer->GetTicks() + cycles_into_future), 0,
|
||||
userdata, event_type});
|
||||
}
|
||||
}
|
||||
|
||||
void Timing::UnscheduleEvent(const TimingEventType* event_type, u64 userdata) {
|
||||
for (auto timer : timers) {
|
||||
auto itr = std::remove_if(
|
||||
timer->event_queue.begin(), timer->event_queue.end(),
|
||||
[&](const Event& e) { return e.type == event_type && e.userdata == userdata; });
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != timer->event_queue.end()) {
|
||||
timer->event_queue.erase(itr, timer->event_queue.end());
|
||||
std::make_heap(timer->event_queue.begin(), timer->event_queue.end(), std::greater<>());
|
||||
}
|
||||
}
|
||||
// TODO:remove events from ts_queue
|
||||
}
|
||||
|
||||
void Timing::RemoveEvent(const TimingEventType* event_type) {
|
||||
for (auto timer : timers) {
|
||||
auto itr = std::remove_if(timer->event_queue.begin(), timer->event_queue.end(),
|
||||
[&](const Event& e) { return e.type == event_type; });
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != timer->event_queue.end()) {
|
||||
timer->event_queue.erase(itr, timer->event_queue.end());
|
||||
std::make_heap(timer->event_queue.begin(), timer->event_queue.end(), std::greater<>());
|
||||
}
|
||||
}
|
||||
// TODO:remove events from ts_queue
|
||||
}
|
||||
|
||||
void Timing::SetCurrentTimer(std::size_t core_id) {
|
||||
current_timer = timers[core_id];
|
||||
}
|
||||
|
||||
s64 Timing::GetTicks() const {
|
||||
return current_timer->GetTicks();
|
||||
}
|
||||
|
||||
s64 Timing::GetGlobalTicks() const {
|
||||
return global_timer;
|
||||
}
|
||||
|
||||
std::chrono::microseconds Timing::GetGlobalTimeUs() const {
|
||||
return std::chrono::microseconds{GetTicks() * 1000000 / BASE_CLOCK_RATE_ARM11};
|
||||
}
|
||||
|
||||
std::shared_ptr<Timing::Timer> Timing::GetTimer(std::size_t cpu_id) {
|
||||
return timers[cpu_id];
|
||||
}
|
||||
|
||||
Timing::Timer::~Timer() {
|
||||
MoveEvents();
|
||||
}
|
||||
|
||||
u64 Timing::GetTicks() const {
|
||||
u64 ticks = static_cast<u64>(global_timer);
|
||||
if (!is_global_timer_sane) {
|
||||
u64 Timing::Timer::GetTicks() const {
|
||||
u64 ticks = static_cast<u64>(executed_ticks);
|
||||
if (!is_timer_sane) {
|
||||
ticks += slice_length - downcount;
|
||||
}
|
||||
return ticks;
|
||||
}
|
||||
|
||||
void Timing::AddTicks(u64 ticks) {
|
||||
void Timing::Timer::AddTicks(u64 ticks) {
|
||||
downcount -= ticks;
|
||||
}
|
||||
|
||||
u64 Timing::GetIdleTicks() const {
|
||||
u64 Timing::Timer::GetIdleTicks() const {
|
||||
return static_cast<u64>(idled_cycles);
|
||||
}
|
||||
|
||||
void Timing::ScheduleEvent(s64 cycles_into_future, const TimingEventType* event_type,
|
||||
u64 userdata) {
|
||||
ASSERT(event_type != nullptr);
|
||||
s64 timeout = GetTicks() + cycles_into_future;
|
||||
|
||||
// If this event needs to be scheduled before the next advance(), force one early
|
||||
if (!is_global_timer_sane)
|
||||
ForceExceptionCheck(cycles_into_future);
|
||||
|
||||
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
|
||||
void Timing::ScheduleEventThreadsafe(s64 cycles_into_future, const TimingEventType* event_type,
|
||||
u64 userdata) {
|
||||
ts_queue.Push(Event{global_timer + cycles_into_future, 0, userdata, event_type});
|
||||
}
|
||||
|
||||
void Timing::UnscheduleEvent(const TimingEventType* event_type, u64 userdata) {
|
||||
auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type == event_type && e.userdata == userdata;
|
||||
});
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != event_queue.end()) {
|
||||
event_queue.erase(itr, event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
}
|
||||
|
||||
void Timing::RemoveEvent(const TimingEventType* event_type) {
|
||||
auto itr = std::remove_if(event_queue.begin(), event_queue.end(),
|
||||
[&](const Event& e) { return e.type == event_type; });
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != event_queue.end()) {
|
||||
event_queue.erase(itr, event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
}
|
||||
|
||||
void Timing::RemoveNormalAndThreadsafeEvent(const TimingEventType* event_type) {
|
||||
MoveEvents();
|
||||
RemoveEvent(event_type);
|
||||
}
|
||||
|
||||
void Timing::ForceExceptionCheck(s64 cycles) {
|
||||
void Timing::Timer::ForceExceptionCheck(s64 cycles) {
|
||||
cycles = std::max<s64>(0, cycles);
|
||||
if (downcount > cycles) {
|
||||
slice_length -= downcount - cycles;
|
||||
|
@ -108,7 +145,7 @@ void Timing::ForceExceptionCheck(s64 cycles) {
|
|||
}
|
||||
}
|
||||
|
||||
void Timing::MoveEvents() {
|
||||
void Timing::Timer::MoveEvents() {
|
||||
for (Event ev; ts_queue.Pop(ev);) {
|
||||
ev.fifo_order = event_fifo_id++;
|
||||
event_queue.emplace_back(std::move(ev));
|
||||
|
@ -116,50 +153,54 @@ void Timing::MoveEvents() {
|
|||
}
|
||||
}
|
||||
|
||||
void Timing::Advance() {
|
||||
s64 Timing::Timer::GetMaxSliceLength() const {
|
||||
auto next_event = std::find_if(event_queue.begin(), event_queue.end(),
|
||||
[&](const Event& e) { return e.time - executed_ticks > 0; });
|
||||
if (next_event != event_queue.end()) {
|
||||
return next_event->time - executed_ticks;
|
||||
}
|
||||
return MAX_SLICE_LENGTH;
|
||||
}
|
||||
|
||||
void Timing::Timer::Advance(s64 max_slice_length) {
|
||||
MoveEvents();
|
||||
|
||||
s64 cycles_executed = slice_length - downcount;
|
||||
global_timer += cycles_executed;
|
||||
slice_length = MAX_SLICE_LENGTH;
|
||||
idled_cycles = 0;
|
||||
executed_ticks += cycles_executed;
|
||||
slice_length = max_slice_length;
|
||||
|
||||
is_global_timer_sane = true;
|
||||
is_timer_sane = true;
|
||||
|
||||
while (!event_queue.empty() && event_queue.front().time <= global_timer) {
|
||||
while (!event_queue.empty() && event_queue.front().time <= executed_ticks) {
|
||||
Event evt = std::move(event_queue.front());
|
||||
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
event_queue.pop_back();
|
||||
if (event_types.find(*evt.type->name) == event_types.end()) {
|
||||
LOG_ERROR(Core, "Unknown queued event {}", *evt.type->name);
|
||||
} else if (evt.type->callback == nullptr) {
|
||||
if (evt.type->callback == nullptr) {
|
||||
LOG_ERROR(Core, "Event '{}' has no callback", *evt.type->name);
|
||||
}
|
||||
if (evt.type->callback != nullptr) {
|
||||
evt.type->callback(evt.userdata, global_timer - evt.time);
|
||||
evt.type->callback(evt.userdata, executed_ticks - evt.time);
|
||||
}
|
||||
}
|
||||
|
||||
is_global_timer_sane = false;
|
||||
is_timer_sane = false;
|
||||
|
||||
// Still events left (scheduled in the future)
|
||||
if (!event_queue.empty()) {
|
||||
slice_length = static_cast<int>(
|
||||
std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH));
|
||||
std::min<s64>(event_queue.front().time - executed_ticks, max_slice_length));
|
||||
}
|
||||
|
||||
downcount = slice_length;
|
||||
}
|
||||
|
||||
void Timing::Idle() {
|
||||
void Timing::Timer::Idle() {
|
||||
idled_cycles += downcount;
|
||||
downcount = 0;
|
||||
}
|
||||
|
||||
std::chrono::microseconds Timing::GetGlobalTimeUs() const {
|
||||
return std::chrono::microseconds{GetTicks() * 1000000 / BASE_CLOCK_RATE_ARM11};
|
||||
}
|
||||
|
||||
s64 Timing::GetDowncount() const {
|
||||
s64 Timing::Timer::GetDowncount() const {
|
||||
return downcount;
|
||||
}
|
||||
|
||||
|
|
|
@ -135,65 +135,10 @@ struct TimingEventType {
|
|||
};
|
||||
|
||||
class Timing {
|
||||
public:
|
||||
~Timing();
|
||||
|
||||
/**
|
||||
* This should only be called from the emu thread, if you are calling it any other thread, you
|
||||
* are doing something evil
|
||||
*/
|
||||
u64 GetTicks() const;
|
||||
u64 GetIdleTicks() const;
|
||||
void AddTicks(u64 ticks);
|
||||
|
||||
/**
|
||||
* Returns the event_type identifier. if name is not unique, it will assert.
|
||||
*/
|
||||
TimingEventType* RegisterEvent(const std::string& name, TimedCallback callback);
|
||||
|
||||
/**
|
||||
* After the first Advance, the slice lengths and the downcount will be reduced whenever an
|
||||
* event is scheduled earlier than the current values. Scheduling from a callback will not
|
||||
* update the downcount until the Advance() completes.
|
||||
*/
|
||||
void ScheduleEvent(s64 cycles_into_future, const TimingEventType* event_type, u64 userdata = 0);
|
||||
|
||||
/**
|
||||
* This is to be called when outside of hle threads, such as the graphics thread, wants to
|
||||
* schedule things to be executed on the main thread.
|
||||
* Not that this doesn't change slice_length and thus events scheduled by this might be called
|
||||
* with a delay of up to MAX_SLICE_LENGTH
|
||||
*/
|
||||
void ScheduleEventThreadsafe(s64 cycles_into_future, const TimingEventType* event_type,
|
||||
u64 userdata);
|
||||
|
||||
void UnscheduleEvent(const TimingEventType* event_type, u64 userdata);
|
||||
|
||||
/// We only permit one event of each type in the queue at a time.
|
||||
void RemoveEvent(const TimingEventType* event_type);
|
||||
void RemoveNormalAndThreadsafeEvent(const TimingEventType* event_type);
|
||||
|
||||
/** Advance must be called at the beginning of dispatcher loops, not the end. Advance() ends
|
||||
* the previous timing slice and begins the next one, you must Advance from the previous
|
||||
* slice to the current one before executing any cycles. CoreTiming starts in slice -1 so an
|
||||
* Advance() is required to initialize the slice length before the first cycle of emulated
|
||||
* instructions is executed.
|
||||
*/
|
||||
void Advance();
|
||||
void MoveEvents();
|
||||
|
||||
/// Pretend that the main CPU has executed enough cycles to reach the next event.
|
||||
void Idle();
|
||||
|
||||
void ForceExceptionCheck(s64 cycles);
|
||||
|
||||
std::chrono::microseconds GetGlobalTimeUs() const;
|
||||
|
||||
s64 GetDowncount() const;
|
||||
|
||||
private:
|
||||
static Timing* deserializing;
|
||||
|
||||
public:
|
||||
struct Event {
|
||||
s64 time;
|
||||
u64 fifo_order;
|
||||
|
@ -229,14 +174,29 @@ private:
|
|||
|
||||
static constexpr int MAX_SLICE_LENGTH = 20000;
|
||||
|
||||
s64 global_timer = 0;
|
||||
s64 slice_length = MAX_SLICE_LENGTH;
|
||||
s64 downcount = MAX_SLICE_LENGTH;
|
||||
class Timer {
|
||||
public:
|
||||
~Timer();
|
||||
|
||||
// unordered_map stores each element separately as a linked list node so pointers to
|
||||
// elements remain stable regardless of rehashes/resizing.
|
||||
std::unordered_map<std::string, TimingEventType> event_types;
|
||||
s64 GetMaxSliceLength() const;
|
||||
|
||||
void Advance(s64 max_slice_length = MAX_SLICE_LENGTH);
|
||||
|
||||
void Idle();
|
||||
|
||||
u64 GetTicks() const;
|
||||
u64 GetIdleTicks() const;
|
||||
|
||||
void AddTicks(u64 ticks);
|
||||
|
||||
s64 GetDowncount() const;
|
||||
|
||||
void ForceExceptionCheck(s64 cycles);
|
||||
|
||||
void MoveEvents();
|
||||
|
||||
private:
|
||||
friend class Timing;
|
||||
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
|
||||
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
|
||||
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
|
||||
|
@ -246,31 +206,84 @@ private:
|
|||
// the queue for storing the events from other threads threadsafe until they will be added
|
||||
// to the event_queue by the emu thread
|
||||
Common::MPSCQueue<Event> ts_queue;
|
||||
s64 idled_cycles = 0;
|
||||
|
||||
// Are we in a function that has been called from Advance()
|
||||
// If events are sheduled from a function that gets called from Advance(),
|
||||
// don't change slice_length and downcount.
|
||||
// The time between CoreTiming being intialized and the first call to Advance() is considered
|
||||
// the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before
|
||||
// executing the first cycle of each slice to prepare the slice length and downcount for
|
||||
// that slice.
|
||||
bool is_global_timer_sane = true;
|
||||
// The time between CoreTiming being intialized and the first call to Advance() is
|
||||
// considered the slice boundary between slice -1 and slice 0. Dispatcher loops must call
|
||||
// Advance() before executing the first cycle of each slice to prepare the slice length and
|
||||
// downcount for that slice.
|
||||
bool is_timer_sane = true;
|
||||
|
||||
s64 slice_length = MAX_SLICE_LENGTH;
|
||||
s64 downcount = MAX_SLICE_LENGTH;
|
||||
s64 executed_ticks = 0;
|
||||
u64 idled_cycles;
|
||||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int) {
|
||||
// event_types set during initialization of other things
|
||||
deserializing = this;
|
||||
MoveEvents();
|
||||
ar& global_timer;
|
||||
ar& slice_length;
|
||||
ar& downcount;
|
||||
ar& event_queue;
|
||||
ar& event_fifo_id;
|
||||
ar& idled_cycles;
|
||||
deserializing = nullptr;
|
||||
}
|
||||
friend class boost::serialization::access;
|
||||
};
|
||||
|
||||
explicit Timing(std::size_t num_cores);
|
||||
|
||||
~Timing(){};
|
||||
|
||||
/**
|
||||
* Returns the event_type identifier. if name is not unique, it will assert.
|
||||
*/
|
||||
TimingEventType* RegisterEvent(const std::string& name, TimedCallback callback);
|
||||
|
||||
void ScheduleEvent(s64 cycles_into_future, const TimingEventType* event_type, u64 userdata = 0,
|
||||
std::size_t core_id = std::numeric_limits<std::size_t>::max());
|
||||
|
||||
void UnscheduleEvent(const TimingEventType* event_type, u64 userdata);
|
||||
|
||||
/// We only permit one event of each type in the queue at a time.
|
||||
void RemoveEvent(const TimingEventType* event_type);
|
||||
|
||||
void SetCurrentTimer(std::size_t core_id);
|
||||
|
||||
s64 GetTicks() const;
|
||||
|
||||
s64 GetGlobalTicks() const;
|
||||
|
||||
void AddToGlobalTicks(s64 ticks) {
|
||||
global_timer += ticks;
|
||||
}
|
||||
|
||||
std::chrono::microseconds GetGlobalTimeUs() const;
|
||||
|
||||
std::shared_ptr<Timer> GetTimer(std::size_t cpu_id);
|
||||
|
||||
private:
|
||||
s64 global_timer = 0;
|
||||
|
||||
// unordered_map stores each element separately as a linked list node so pointers to
|
||||
// elements remain stable regardless of rehashes/resizing.
|
||||
std::unordered_map<std::string, TimingEventType> event_types;
|
||||
|
||||
std::vector<std::shared_ptr<Timer>> timers;
|
||||
std::shared_ptr<Timer> current_timer;
|
||||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int) {
|
||||
// event_types set during initialization of other things
|
||||
deserializing = this;
|
||||
ar& global_timer;
|
||||
ar& timers;
|
||||
ar& current_timer;
|
||||
deserializing = nullptr;
|
||||
}
|
||||
friend class boost::serialization::access;
|
||||
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
|
|
@ -0,0 +1,604 @@
|
|||
// Copyright 2020 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_paths.h"
|
||||
#include "common/file_util.h"
|
||||
#include "common/string_util.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/file_sys/layered_fs.h"
|
||||
#include "core/file_sys/patch.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
struct FileRelocationInfo {
|
||||
int type; // 0 - none, 1 - replaced / created, 2 - patched, 3 - removed
|
||||
u64 original_offset; // Type 0. Offset is absolute
|
||||
std::string replace_file_path; // Type 1
|
||||
std::vector<u8> patched_file; // Type 2
|
||||
u64 size; // Relocated file size
|
||||
};
|
||||
struct LayeredFS::File {
|
||||
std::string name;
|
||||
std::string path;
|
||||
FileRelocationInfo relocation{};
|
||||
Directory* parent;
|
||||
};
|
||||
|
||||
struct DirectoryMetadata {
|
||||
u32_le parent_directory_offset;
|
||||
u32_le next_sibling_offset;
|
||||
u32_le first_child_directory_offset;
|
||||
u32_le first_file_offset;
|
||||
u32_le hash_bucket_next;
|
||||
u32_le name_length;
|
||||
// Followed by a name of name length (aligned up to 4)
|
||||
};
|
||||
static_assert(sizeof(DirectoryMetadata) == 0x18, "Size of DirectoryMetadata is not correct");
|
||||
|
||||
struct FileMetadata {
|
||||
u32_le parent_directory_offset;
|
||||
u32_le next_sibling_offset;
|
||||
u64_le file_data_offset;
|
||||
u64_le file_data_length;
|
||||
u32_le hash_bucket_next;
|
||||
u32_le name_length;
|
||||
// Followed by a name of name length (aligned up to 4)
|
||||
};
|
||||
static_assert(sizeof(FileMetadata) == 0x20, "Size of FileMetadata is not correct");
|
||||
|
||||
LayeredFS::LayeredFS(std::shared_ptr<RomFSReader> romfs_, std::string patch_path_,
|
||||
std::string patch_ext_path_, bool load_relocations)
|
||||
: romfs(std::move(romfs_)), patch_path(std::move(patch_path_)),
|
||||
patch_ext_path(std::move(patch_ext_path_)) {
|
||||
|
||||
romfs->ReadFile(0, sizeof(header), reinterpret_cast<u8*>(&header));
|
||||
|
||||
ASSERT_MSG(header.header_length == sizeof(header), "Header size is incorrect");
|
||||
|
||||
// TODO: is root always the first directory in table?
|
||||
root.parent = &root;
|
||||
LoadDirectory(root, 0);
|
||||
|
||||
if (load_relocations) {
|
||||
LoadRelocations();
|
||||
LoadExtRelocations();
|
||||
}
|
||||
|
||||
RebuildMetadata();
|
||||
}
|
||||
|
||||
LayeredFS::~LayeredFS() = default;
|
||||
|
||||
void LayeredFS::LoadDirectory(Directory& current, u32 offset) {
|
||||
DirectoryMetadata metadata;
|
||||
romfs->ReadFile(header.directory_metadata_table.offset + offset, sizeof(metadata),
|
||||
reinterpret_cast<u8*>(&metadata));
|
||||
|
||||
current.name = ReadName(header.directory_metadata_table.offset + offset + sizeof(metadata),
|
||||
metadata.name_length);
|
||||
current.path = current.parent->path + current.name + DIR_SEP;
|
||||
directory_path_map.emplace(current.path, ¤t);
|
||||
|
||||
if (metadata.first_file_offset != 0xFFFFFFFF) {
|
||||
LoadFile(current, metadata.first_file_offset);
|
||||
}
|
||||
|
||||
if (metadata.first_child_directory_offset != 0xFFFFFFFF) {
|
||||
auto child = std::make_unique<Directory>();
|
||||
auto& directory = *child;
|
||||
directory.parent = ¤t;
|
||||
current.directories.emplace_back(std::move(child));
|
||||
LoadDirectory(directory, metadata.first_child_directory_offset);
|
||||
}
|
||||
|
||||
if (metadata.next_sibling_offset != 0xFFFFFFFF) {
|
||||
auto sibling = std::make_unique<Directory>();
|
||||
auto& directory = *sibling;
|
||||
directory.parent = current.parent;
|
||||
current.parent->directories.emplace_back(std::move(sibling));
|
||||
LoadDirectory(directory, metadata.next_sibling_offset);
|
||||
}
|
||||
}
|
||||
|
||||
void LayeredFS::LoadFile(Directory& parent, u32 offset) {
|
||||
FileMetadata metadata;
|
||||
romfs->ReadFile(header.file_metadata_table.offset + offset, sizeof(metadata),
|
||||
reinterpret_cast<u8*>(&metadata));
|
||||
|
||||
auto file = std::make_unique<File>();
|
||||
file->name = ReadName(header.file_metadata_table.offset + offset + sizeof(metadata),
|
||||
metadata.name_length);
|
||||
file->path = parent.path + file->name;
|
||||
file->relocation.original_offset = header.file_data_offset + metadata.file_data_offset;
|
||||
file->relocation.size = metadata.file_data_length;
|
||||
file->parent = &parent;
|
||||
|
||||
file_path_map.emplace(file->path, file.get());
|
||||
parent.files.emplace_back(std::move(file));
|
||||
|
||||
if (metadata.next_sibling_offset != 0xFFFFFFFF) {
|
||||
LoadFile(parent, metadata.next_sibling_offset);
|
||||
}
|
||||
}
|
||||
|
||||
std::string LayeredFS::ReadName(u32 offset, u32 name_length) {
|
||||
std::vector<u16_le> buffer(name_length / sizeof(u16_le));
|
||||
romfs->ReadFile(offset, name_length, reinterpret_cast<u8*>(buffer.data()));
|
||||
|
||||
std::u16string name(buffer.size(), 0);
|
||||
std::transform(buffer.begin(), buffer.end(), name.begin(), [](u16_le character) {
|
||||
return static_cast<char16_t>(static_cast<u16>(character));
|
||||
});
|
||||
return Common::UTF16ToUTF8(name);
|
||||
}
|
||||
|
||||
void LayeredFS::LoadRelocations() {
|
||||
if (!FileUtil::Exists(patch_path)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const FileUtil::DirectoryEntryCallable callback = [this,
|
||||
&callback](u64* /*num_entries_out*/,
|
||||
const std::string& directory,
|
||||
const std::string& virtual_name) {
|
||||
auto* parent = directory_path_map.at(directory.substr(patch_path.size() - 1));
|
||||
|
||||
if (FileUtil::IsDirectory(directory + virtual_name + DIR_SEP)) {
|
||||
const auto path = (directory + virtual_name + DIR_SEP).substr(patch_path.size() - 1);
|
||||
if (!directory_path_map.count(path)) { // Add this directory
|
||||
auto directory = std::make_unique<Directory>();
|
||||
directory->name = virtual_name;
|
||||
directory->path = path;
|
||||
directory->parent = parent;
|
||||
directory_path_map.emplace(path, directory.get());
|
||||
parent->directories.emplace_back(std::move(directory));
|
||||
LOG_INFO(Service_FS, "LayeredFS created directory {}", path);
|
||||
}
|
||||
return FileUtil::ForeachDirectoryEntry(nullptr, directory + virtual_name + DIR_SEP,
|
||||
callback);
|
||||
}
|
||||
|
||||
const auto path = (directory + virtual_name).substr(patch_path.size() - 1);
|
||||
if (!file_path_map.count(path)) { // Newly created file
|
||||
auto file = std::make_unique<File>();
|
||||
file->name = virtual_name;
|
||||
file->path = path;
|
||||
file->parent = parent;
|
||||
file_path_map.emplace(path, file.get());
|
||||
parent->files.emplace_back(std::move(file));
|
||||
LOG_INFO(Service_FS, "LayeredFS created file {}", path);
|
||||
}
|
||||
|
||||
auto* file = file_path_map.at(path);
|
||||
file->relocation.type = 1;
|
||||
file->relocation.replace_file_path = directory + virtual_name;
|
||||
file->relocation.size = FileUtil::GetSize(directory + virtual_name);
|
||||
LOG_INFO(Service_FS, "LayeredFS replacement file in use for {}", path);
|
||||
return true;
|
||||
};
|
||||
|
||||
FileUtil::ForeachDirectoryEntry(nullptr, patch_path, callback);
|
||||
}
|
||||
|
||||
void LayeredFS::LoadExtRelocations() {
|
||||
if (!FileUtil::Exists(patch_ext_path)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (patch_ext_path.back() == '/' || patch_ext_path.back() == '\\') {
|
||||
// ScanDirectoryTree expects a path without trailing '/'
|
||||
patch_ext_path.erase(patch_ext_path.size() - 1, 1);
|
||||
}
|
||||
|
||||
FileUtil::FSTEntry result;
|
||||
FileUtil::ScanDirectoryTree(patch_ext_path, result, 256);
|
||||
|
||||
for (const auto& entry : result.children) {
|
||||
if (FileUtil::IsDirectory(entry.physicalName)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto path = entry.physicalName.substr(patch_ext_path.size());
|
||||
if (path.size() >= 5 && path.substr(path.size() - 5) == ".stub") {
|
||||
// Remove the corresponding file if exists
|
||||
const auto file_path = path.substr(0, path.size() - 5);
|
||||
if (file_path_map.count(file_path)) {
|
||||
auto& file = *file_path_map[file_path];
|
||||
file.relocation.type = 3;
|
||||
file.relocation.size = 0;
|
||||
file_path_map.erase(file_path);
|
||||
LOG_INFO(Service_FS, "LayeredFS removed file {}", file_path);
|
||||
} else {
|
||||
LOG_WARNING(Service_FS, "LayeredFS file for stub {} not found", path);
|
||||
}
|
||||
} else if (path.size() >= 4) {
|
||||
const auto extension = path.substr(path.size() - 4);
|
||||
if (extension != ".ips" && extension != ".bps") {
|
||||
LOG_WARNING(Service_FS, "LayeredFS unknown ext file {}", path);
|
||||
}
|
||||
|
||||
const auto file_path = path.substr(0, path.size() - 4);
|
||||
if (!file_path_map.count(file_path)) {
|
||||
LOG_WARNING(Service_FS, "LayeredFS original file for patch {} not found", path);
|
||||
continue;
|
||||
}
|
||||
|
||||
FileUtil::IOFile patch_file(entry.physicalName, "rb");
|
||||
if (!patch_file) {
|
||||
LOG_ERROR(Service_FS, "LayeredFS Could not open file {}", entry.physicalName);
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto size = patch_file.GetSize();
|
||||
std::vector<u8> patch(size);
|
||||
if (patch_file.ReadBytes(patch.data(), size) != size) {
|
||||
LOG_ERROR(Service_FS, "LayeredFS Could not read file {}", entry.physicalName);
|
||||
continue;
|
||||
}
|
||||
|
||||
auto& file = *file_path_map[file_path];
|
||||
std::vector<u8> buffer(file.relocation.size); // Original size
|
||||
romfs->ReadFile(file.relocation.original_offset, buffer.size(), buffer.data());
|
||||
|
||||
bool ret = false;
|
||||
if (extension == ".ips") {
|
||||
ret = Patch::ApplyIpsPatch(patch, buffer);
|
||||
} else {
|
||||
ret = Patch::ApplyBpsPatch(patch, buffer);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
LOG_INFO(Service_FS, "LayeredFS patched file {}", file_path);
|
||||
|
||||
file.relocation.type = 2;
|
||||
file.relocation.size = buffer.size();
|
||||
file.relocation.patched_file = std::move(buffer);
|
||||
} else {
|
||||
LOG_ERROR(Service_FS, "LayeredFS failed to patch file {}", file_path);
|
||||
}
|
||||
} else {
|
||||
LOG_WARNING(Service_FS, "LayeredFS unknown ext file {}", path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::size_t GetNameSize(const std::string& name) {
|
||||
std::u16string u16name = Common::UTF8ToUTF16(name);
|
||||
return Common::AlignUp(u16name.size() * 2, 4);
|
||||
}
|
||||
|
||||
void LayeredFS::PrepareBuildDirectory(Directory& current) {
|
||||
directory_metadata_offset_map.emplace(¤t, current_directory_offset);
|
||||
directory_list.emplace_back(¤t);
|
||||
current_directory_offset += sizeof(DirectoryMetadata) + GetNameSize(current.name);
|
||||
}
|
||||
|
||||
void LayeredFS::PrepareBuildFile(File& current) {
|
||||
if (current.relocation.type == 3) { // Deleted files are not counted
|
||||
return;
|
||||
}
|
||||
file_metadata_offset_map.emplace(¤t, current_file_offset);
|
||||
file_list.emplace_back(¤t);
|
||||
current_file_offset += sizeof(FileMetadata) + GetNameSize(current.name);
|
||||
}
|
||||
|
||||
void LayeredFS::PrepareBuild(Directory& current) {
|
||||
for (const auto& child : current.files) {
|
||||
PrepareBuildFile(*child);
|
||||
}
|
||||
|
||||
for (const auto& child : current.directories) {
|
||||
PrepareBuildDirectory(*child);
|
||||
}
|
||||
|
||||
for (const auto& child : current.directories) {
|
||||
PrepareBuild(*child);
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation from 3dbrew
|
||||
u32 CalcHash(const std::string& name, u32 parent_offset) {
|
||||
u32 hash = parent_offset ^ 123456789;
|
||||
std::u16string u16name = Common::UTF8ToUTF16(name);
|
||||
for (char16_t c : u16name) {
|
||||
hash = (hash >> 5) | (hash << 27);
|
||||
hash ^= static_cast<u16>(c);
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
std::size_t WriteName(u8* dest, std::u16string name) {
|
||||
const auto buffer_size = Common::AlignUp(name.size() * 2, 4);
|
||||
std::vector<u16_le> buffer(buffer_size / 2);
|
||||
std::transform(name.begin(), name.end(), buffer.begin(), [](char16_t character) {
|
||||
return static_cast<u16_le>(static_cast<u16>(character));
|
||||
});
|
||||
std::memcpy(dest, buffer.data(), buffer_size);
|
||||
|
||||
return buffer_size;
|
||||
}
|
||||
|
||||
void LayeredFS::BuildDirectories() {
|
||||
directory_metadata_table.resize(current_directory_offset, 0xFF);
|
||||
|
||||
std::size_t written = 0;
|
||||
for (const auto& directory : directory_list) {
|
||||
DirectoryMetadata metadata;
|
||||
std::memset(&metadata, 0xFF, sizeof(metadata));
|
||||
metadata.parent_directory_offset = directory_metadata_offset_map.at(directory->parent);
|
||||
|
||||
if (directory->parent != directory) {
|
||||
bool flag = false;
|
||||
for (const auto& sibling : directory->parent->directories) {
|
||||
if (flag) {
|
||||
metadata.next_sibling_offset = directory_metadata_offset_map.at(sibling.get());
|
||||
break;
|
||||
} else if (sibling.get() == directory) {
|
||||
flag = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!directory->directories.empty()) {
|
||||
metadata.first_child_directory_offset =
|
||||
directory_metadata_offset_map.at(directory->directories.front().get());
|
||||
}
|
||||
|
||||
if (!directory->files.empty()) {
|
||||
metadata.first_file_offset =
|
||||
file_metadata_offset_map.at(directory->files.front().get());
|
||||
}
|
||||
|
||||
const auto bucket = CalcHash(directory->name, metadata.parent_directory_offset) %
|
||||
directory_hash_table.size();
|
||||
metadata.hash_bucket_next = directory_hash_table[bucket];
|
||||
directory_hash_table[bucket] = directory_metadata_offset_map.at(directory);
|
||||
|
||||
// Write metadata and name
|
||||
std::u16string u16name = Common::UTF8ToUTF16(directory->name);
|
||||
metadata.name_length = u16name.size() * 2;
|
||||
|
||||
std::memcpy(directory_metadata_table.data() + written, &metadata, sizeof(metadata));
|
||||
written += sizeof(metadata);
|
||||
|
||||
written += WriteName(directory_metadata_table.data() + written, u16name);
|
||||
}
|
||||
|
||||
ASSERT_MSG(written == directory_metadata_table.size(),
|
||||
"Calculated size for directory metadata table is wrong");
|
||||
}
|
||||
|
||||
void LayeredFS::BuildFiles() {
|
||||
file_metadata_table.resize(current_file_offset, 0xFF);
|
||||
|
||||
std::size_t written = 0;
|
||||
for (const auto& file : file_list) {
|
||||
FileMetadata metadata;
|
||||
std::memset(&metadata, 0xFF, sizeof(metadata));
|
||||
|
||||
metadata.parent_directory_offset = directory_metadata_offset_map.at(file->parent);
|
||||
|
||||
bool flag = false;
|
||||
for (const auto& sibling : file->parent->files) {
|
||||
if (sibling->relocation.type == 3) { // removed file
|
||||
continue;
|
||||
}
|
||||
if (flag) {
|
||||
metadata.next_sibling_offset = file_metadata_offset_map.at(sibling.get());
|
||||
break;
|
||||
} else if (sibling.get() == file) {
|
||||
flag = true;
|
||||
}
|
||||
}
|
||||
|
||||
metadata.file_data_offset = current_data_offset;
|
||||
metadata.file_data_length = file->relocation.size;
|
||||
current_data_offset += Common::AlignUp(metadata.file_data_length, 16);
|
||||
if (metadata.file_data_length != 0) {
|
||||
data_offset_map.emplace(metadata.file_data_offset, file);
|
||||
}
|
||||
|
||||
const auto bucket =
|
||||
CalcHash(file->name, metadata.parent_directory_offset) % file_hash_table.size();
|
||||
metadata.hash_bucket_next = file_hash_table[bucket];
|
||||
file_hash_table[bucket] = file_metadata_offset_map.at(file);
|
||||
|
||||
// Write metadata and name
|
||||
std::u16string u16name = Common::UTF8ToUTF16(file->name);
|
||||
metadata.name_length = u16name.size() * 2;
|
||||
|
||||
std::memcpy(file_metadata_table.data() + written, &metadata, sizeof(metadata));
|
||||
written += sizeof(metadata);
|
||||
|
||||
written += WriteName(file_metadata_table.data() + written, u16name);
|
||||
}
|
||||
|
||||
ASSERT_MSG(written == file_metadata_table.size(),
|
||||
"Calculated size for file metadata table is wrong");
|
||||
}
|
||||
|
||||
// Implementation from 3dbrew
|
||||
std::size_t GetHashTableSize(std::size_t entry_count) {
|
||||
if (entry_count < 3) {
|
||||
return 3;
|
||||
} else if (entry_count < 19) {
|
||||
return entry_count | 1;
|
||||
} else {
|
||||
std::size_t count = entry_count;
|
||||
while (count % 2 == 0 || count % 3 == 0 || count % 5 == 0 || count % 7 == 0 ||
|
||||
count % 11 == 0 || count % 13 == 0 || count % 17 == 0) {
|
||||
count++;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
}
|
||||
|
||||
void LayeredFS::RebuildMetadata() {
|
||||
PrepareBuildDirectory(root);
|
||||
PrepareBuild(root);
|
||||
|
||||
directory_hash_table.resize(GetHashTableSize(directory_list.size()), 0xFFFFFFFF);
|
||||
file_hash_table.resize(GetHashTableSize(file_list.size()), 0xFFFFFFFF);
|
||||
|
||||
BuildDirectories();
|
||||
BuildFiles();
|
||||
|
||||
// Create header
|
||||
RomFSHeader header;
|
||||
header.header_length = sizeof(header);
|
||||
header.directory_hash_table = {
|
||||
/*offset*/ sizeof(header),
|
||||
/*length*/ static_cast<u32_le>(directory_hash_table.size() * sizeof(u32_le))};
|
||||
header.directory_metadata_table = {
|
||||
/*offset*/
|
||||
header.directory_hash_table.offset + header.directory_hash_table.length,
|
||||
/*length*/ static_cast<u32_le>(directory_metadata_table.size())};
|
||||
header.file_hash_table = {
|
||||
/*offset*/
|
||||
header.directory_metadata_table.offset + header.directory_metadata_table.length,
|
||||
/*length*/ static_cast<u32_le>(file_hash_table.size() * sizeof(u32_le))};
|
||||
header.file_metadata_table = {/*offset*/ header.file_hash_table.offset +
|
||||
header.file_hash_table.length,
|
||||
/*length*/ static_cast<u32_le>(file_metadata_table.size())};
|
||||
header.file_data_offset =
|
||||
Common::AlignUp(header.file_metadata_table.offset + header.file_metadata_table.length, 16);
|
||||
|
||||
// Write hash table and metadata table
|
||||
metadata.resize(header.file_data_offset);
|
||||
std::memcpy(metadata.data(), &header, header.header_length);
|
||||
std::memcpy(metadata.data() + header.directory_hash_table.offset, directory_hash_table.data(),
|
||||
header.directory_hash_table.length);
|
||||
std::memcpy(metadata.data() + header.directory_metadata_table.offset,
|
||||
directory_metadata_table.data(), header.directory_metadata_table.length);
|
||||
std::memcpy(metadata.data() + header.file_hash_table.offset, file_hash_table.data(),
|
||||
header.file_hash_table.length);
|
||||
std::memcpy(metadata.data() + header.file_metadata_table.offset, file_metadata_table.data(),
|
||||
header.file_metadata_table.length);
|
||||
}
|
||||
|
||||
std::size_t LayeredFS::GetSize() const {
|
||||
return metadata.size() + current_data_offset;
|
||||
}
|
||||
|
||||
std::size_t LayeredFS::ReadFile(std::size_t offset, std::size_t length, u8* buffer) {
|
||||
ASSERT_MSG(offset + length <= GetSize(), "Out of bound");
|
||||
|
||||
std::size_t read_size = 0;
|
||||
if (offset < metadata.size()) {
|
||||
// First read the metadata
|
||||
const auto to_read = std::min(metadata.size() - offset, length);
|
||||
std::memcpy(buffer, metadata.data() + offset, to_read);
|
||||
read_size += to_read;
|
||||
offset = 0;
|
||||
} else {
|
||||
offset -= metadata.size();
|
||||
}
|
||||
|
||||
// Read files
|
||||
auto current = (--data_offset_map.upper_bound(offset));
|
||||
while (read_size < length) {
|
||||
const auto relative_offset = offset - current->first;
|
||||
std::size_t to_read{};
|
||||
if (current->second->relocation.size > relative_offset) {
|
||||
to_read = std::min<std::size_t>(current->second->relocation.size - relative_offset,
|
||||
length - read_size);
|
||||
}
|
||||
const auto alignment =
|
||||
std::min<std::size_t>(Common::AlignUp(current->second->relocation.size, 16) -
|
||||
relative_offset,
|
||||
length - read_size) -
|
||||
to_read;
|
||||
|
||||
// Read the file in different ways depending on relocation type
|
||||
auto& relocation = current->second->relocation;
|
||||
if (relocation.type == 0) { // none
|
||||
romfs->ReadFile(relocation.original_offset + relative_offset, to_read,
|
||||
buffer + read_size);
|
||||
} else if (relocation.type == 1) { // replace
|
||||
FileUtil::IOFile replace_file(relocation.replace_file_path, "rb");
|
||||
if (replace_file) {
|
||||
replace_file.Seek(relative_offset, SEEK_SET);
|
||||
replace_file.ReadBytes(buffer + read_size, to_read);
|
||||
} else {
|
||||
LOG_ERROR(Service_FS, "Could not open replacement file for {}",
|
||||
current->second->path);
|
||||
}
|
||||
} else if (relocation.type == 2) { // patch
|
||||
std::memcpy(buffer + read_size, relocation.patched_file.data() + relative_offset,
|
||||
to_read);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
std::memset(buffer + read_size + to_read, 0, alignment);
|
||||
|
||||
read_size += to_read + alignment;
|
||||
offset += to_read + alignment;
|
||||
current++;
|
||||
}
|
||||
|
||||
return read_size;
|
||||
}
|
||||
|
||||
bool LayeredFS::ExtractDirectory(Directory& current, const std::string& target_path) {
|
||||
if (!FileUtil::CreateFullPath(target_path + current.path)) {
|
||||
LOG_ERROR(Service_FS, "Could not create path {}", target_path + current.path);
|
||||
return false;
|
||||
}
|
||||
|
||||
constexpr std::size_t BufferSize = 0x10000;
|
||||
std::array<u8, BufferSize> buffer;
|
||||
for (const auto& file : current.files) {
|
||||
// Extract file
|
||||
const auto path = target_path + file->path;
|
||||
LOG_INFO(Service_FS, "Extracting {} to {}", file->path, path);
|
||||
|
||||
FileUtil::IOFile target_file(path, "wb");
|
||||
if (!target_file) {
|
||||
LOG_ERROR(Service_FS, "Could not open file {}", path);
|
||||
return false;
|
||||
}
|
||||
|
||||
std::size_t written = 0;
|
||||
while (written < file->relocation.size) {
|
||||
const auto to_read =
|
||||
std::min<std::size_t>(buffer.size(), file->relocation.size - written);
|
||||
if (romfs->ReadFile(file->relocation.original_offset + written, to_read,
|
||||
buffer.data()) != to_read) {
|
||||
LOG_ERROR(Service_FS, "Could not read from RomFS");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (target_file.WriteBytes(buffer.data(), to_read) != to_read) {
|
||||
LOG_ERROR(Service_FS, "Could not write to file {}", path);
|
||||
return false;
|
||||
}
|
||||
|
||||
written += to_read;
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto& directory : current.directories) {
|
||||
if (!ExtractDirectory(*directory, target_path)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LayeredFS::DumpRomFS(const std::string& target_path) {
|
||||
std::string path = target_path;
|
||||
if (path.back() == '/' || path.back() == '\\') {
|
||||
path.erase(path.size() - 1, 1);
|
||||
}
|
||||
|
||||
return ExtractDirectory(root, path);
|
||||
}
|
||||
|
||||
} // namespace FileSys
|
|
@ -0,0 +1,123 @@
|
|||
// Copyright 2020 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/file_sys/romfs_reader.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
struct RomFSHeader {
|
||||
struct Descriptor {
|
||||
u32_le offset;
|
||||
u32_le length;
|
||||
};
|
||||
u32_le header_length;
|
||||
Descriptor directory_hash_table;
|
||||
Descriptor directory_metadata_table;
|
||||
Descriptor file_hash_table;
|
||||
Descriptor file_metadata_table;
|
||||
u32_le file_data_offset;
|
||||
};
|
||||
static_assert(sizeof(RomFSHeader) == 0x28, "Size of RomFSHeader is not correct");
|
||||
|
||||
/**
|
||||
* LayeredFS implementation. This basically adds a layer to another RomFSReader.
|
||||
*
|
||||
* patch_path: Path for RomFS replacements. Files present in this path replace or create
|
||||
* corresponding files in RomFS.
|
||||
* patch_ext_path: Path for RomFS extensions. Files present in this path:
|
||||
* - When with an extension of ".stub", remove the corresponding file in the RomFS.
|
||||
* - When with an extension of ".ips" or ".bps", patch the file in the RomFS.
|
||||
*/
|
||||
class LayeredFS : public RomFSReader {
|
||||
public:
|
||||
explicit LayeredFS(std::shared_ptr<RomFSReader> romfs, std::string patch_path,
|
||||
std::string patch_ext_path, bool load_relocations = true);
|
||||
~LayeredFS() override;
|
||||
|
||||
std::size_t GetSize() const override;
|
||||
std::size_t ReadFile(std::size_t offset, std::size_t length, u8* buffer) override;
|
||||
|
||||
bool DumpRomFS(const std::string& target_path);
|
||||
|
||||
private:
|
||||
struct File;
|
||||
struct Directory {
|
||||
std::string name;
|
||||
std::string path; // with trailing '/'
|
||||
std::vector<std::unique_ptr<File>> files;
|
||||
std::vector<std::unique_ptr<Directory>> directories;
|
||||
Directory* parent;
|
||||
};
|
||||
|
||||
std::string ReadName(u32 offset, u32 name_length);
|
||||
|
||||
// Loads the current directory, then its siblings, and then its children.
|
||||
void LoadDirectory(Directory& current, u32 offset);
|
||||
|
||||
// Load the file at offset, and then its siblings.
|
||||
void LoadFile(Directory& parent, u32 offset);
|
||||
|
||||
// Load replace/create relocations
|
||||
void LoadRelocations();
|
||||
|
||||
// Load patch/remove relocations
|
||||
void LoadExtRelocations();
|
||||
|
||||
// Calculate the offset of a single directory add it to the map and list of directories
|
||||
void PrepareBuildDirectory(Directory& current);
|
||||
|
||||
// Calculate the offset of a single file add it to the map and list of files
|
||||
void PrepareBuildFile(File& current);
|
||||
|
||||
// Recursively generate a sequence of files and directories and their offsets for all
|
||||
// children of current. (The current directory itself is not handled.)
|
||||
void PrepareBuild(Directory& current);
|
||||
|
||||
void BuildDirectories();
|
||||
void BuildFiles();
|
||||
|
||||
// Recursively extract a directory and all its contents to target_path
|
||||
// target_path should be without trailing '/'.
|
||||
bool ExtractDirectory(Directory& current, const std::string& target_path);
|
||||
|
||||
void RebuildMetadata();
|
||||
|
||||
std::shared_ptr<RomFSReader> romfs;
|
||||
std::string patch_path;
|
||||
std::string patch_ext_path;
|
||||
|
||||
RomFSHeader header;
|
||||
Directory root;
|
||||
std::unordered_map<std::string, File*> file_path_map;
|
||||
std::unordered_map<std::string, Directory*> directory_path_map;
|
||||
std::map<u64, File*> data_offset_map; // assigned data offset -> file
|
||||
std::vector<u8> metadata; // Includes header, hash table and metadata
|
||||
|
||||
// Used for rebuilding header
|
||||
std::vector<u32_le> directory_hash_table;
|
||||
std::vector<u32_le> file_hash_table;
|
||||
|
||||
std::unordered_map<Directory*, u32>
|
||||
directory_metadata_offset_map; // directory -> metadata offset
|
||||
std::vector<Directory*> directory_list; // sequence of directories to be written to metadata
|
||||
u64 current_directory_offset{}; // current directory metadata offset
|
||||
std::vector<u8> directory_metadata_table; // rebuilt directory metadata table
|
||||
|
||||
std::unordered_map<File*, u32> file_metadata_offset_map; // file -> metadata offset
|
||||
std::vector<File*> file_list; // sequence of files to be written to metadata
|
||||
u64 current_file_offset{}; // current file metadata offset
|
||||
std::vector<u8> file_metadata_table; // rebuilt file metadata table
|
||||
u64 current_data_offset{}; // current assigned data offset
|
||||
};
|
||||
|
||||
} // namespace FileSys
|
|
@ -11,6 +11,7 @@
|
|||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/file_sys/layered_fs.h"
|
||||
#include "core/file_sys/ncch_container.h"
|
||||
#include "core/file_sys/patch.h"
|
||||
#include "core/file_sys/seed_db.h"
|
||||
|
@ -25,6 +26,14 @@ namespace FileSys {
|
|||
static const int kMaxSections = 8; ///< Maximum number of sections (files) in an ExeFs
|
||||
static const int kBlockSize = 0x200; ///< Size of ExeFS blocks (in bytes)
|
||||
|
||||
u64 GetModId(u64 program_id) {
|
||||
constexpr u64 UPDATE_MASK = 0x0000000e'00000000;
|
||||
if ((program_id & 0x000000ff'00000000) == UPDATE_MASK) { // Apply the mods to updates
|
||||
return program_id & ~UPDATE_MASK;
|
||||
}
|
||||
return program_id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the decompressed size of an LZSS compressed ExeFS file
|
||||
* @param buffer Buffer of compressed file
|
||||
|
@ -303,8 +312,22 @@ Loader::ResultStatus NCCHContainer::Load() {
|
|||
}
|
||||
}
|
||||
|
||||
FileUtil::IOFile exheader_override_file{filepath + ".exheader", "rb"};
|
||||
const bool has_exheader_override = read_exheader(exheader_override_file);
|
||||
const auto mods_path =
|
||||
fmt::format("{}mods/{:016X}/", FileUtil::GetUserPath(FileUtil::UserPath::LoadDir),
|
||||
GetModId(ncch_header.program_id));
|
||||
const std::array<std::string, 2> exheader_override_paths{{
|
||||
mods_path + "exheader.bin",
|
||||
filepath + ".exheader",
|
||||
}};
|
||||
|
||||
bool has_exheader_override = false;
|
||||
for (const auto& path : exheader_override_paths) {
|
||||
FileUtil::IOFile exheader_override_file{path, "rb"};
|
||||
if (read_exheader(exheader_override_file)) {
|
||||
has_exheader_override = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (has_exheader_override) {
|
||||
if (exheader_header.system_info.jump_id !=
|
||||
exheader_header.arm11_system_local_caps.program_id) {
|
||||
|
@ -512,7 +535,15 @@ Loader::ResultStatus NCCHContainer::ApplyCodePatch(std::vector<u8>& code) const
|
|||
std::string path;
|
||||
bool (*patch_fn)(const std::vector<u8>& patch, std::vector<u8>& code);
|
||||
};
|
||||
const std::array<PatchLocation, 2> patch_paths{{
|
||||
|
||||
const auto mods_path =
|
||||
fmt::format("{}mods/{:016X}/", FileUtil::GetUserPath(FileUtil::UserPath::LoadDir),
|
||||
GetModId(ncch_header.program_id));
|
||||
const std::array<PatchLocation, 6> patch_paths{{
|
||||
{mods_path + "exefs/code.ips", Patch::ApplyIpsPatch},
|
||||
{mods_path + "exefs/code.bps", Patch::ApplyBpsPatch},
|
||||
{mods_path + "code.ips", Patch::ApplyIpsPatch},
|
||||
{mods_path + "code.bps", Patch::ApplyBpsPatch},
|
||||
{filepath + ".exefsdir/code.ips", Patch::ApplyIpsPatch},
|
||||
{filepath + ".exefsdir/code.bps", Patch::ApplyBpsPatch},
|
||||
}};
|
||||
|
@ -551,8 +582,17 @@ Loader::ResultStatus NCCHContainer::LoadOverrideExeFSSection(const char* name,
|
|||
else
|
||||
return Loader::ResultStatus::Error;
|
||||
|
||||
std::string section_override = filepath + ".exefsdir/" + override_name;
|
||||
FileUtil::IOFile section_file(section_override, "rb");
|
||||
const auto mods_path =
|
||||
fmt::format("{}mods/{:016X}/", FileUtil::GetUserPath(FileUtil::UserPath::LoadDir),
|
||||
GetModId(ncch_header.program_id));
|
||||
const std::array<std::string, 3> override_paths{{
|
||||
mods_path + "exefs/" + override_name,
|
||||
mods_path + override_name,
|
||||
filepath + ".exefsdir/" + override_name,
|
||||
}};
|
||||
|
||||
for (const auto& path : override_paths) {
|
||||
FileUtil::IOFile section_file(path, "rb");
|
||||
|
||||
if (section_file.IsOpen()) {
|
||||
auto section_size = section_file.GetSize();
|
||||
|
@ -560,14 +600,16 @@ Loader::ResultStatus NCCHContainer::LoadOverrideExeFSSection(const char* name,
|
|||
|
||||
section_file.Seek(0, SEEK_SET);
|
||||
if (section_file.ReadBytes(&buffer[0], section_size) == section_size) {
|
||||
LOG_WARNING(Service_FS, "File {} overriding built-in ExeFS file", section_override);
|
||||
LOG_WARNING(Service_FS, "File {} overriding built-in ExeFS file", path);
|
||||
return Loader::ResultStatus::Success;
|
||||
}
|
||||
}
|
||||
}
|
||||
return Loader::ResultStatus::ErrorNotUsed;
|
||||
}
|
||||
|
||||
Loader::ResultStatus NCCHContainer::ReadRomFS(std::shared_ptr<RomFSReader>& romfs_file) {
|
||||
Loader::ResultStatus NCCHContainer::ReadRomFS(std::shared_ptr<RomFSReader>& romfs_file,
|
||||
bool use_layered_fs) {
|
||||
Loader::ResultStatus result = Load();
|
||||
if (result != Loader::ResultStatus::Success)
|
||||
return result;
|
||||
|
@ -597,14 +639,43 @@ Loader::ResultStatus NCCHContainer::ReadRomFS(std::shared_ptr<RomFSReader>& romf
|
|||
if (!romfs_file_inner.IsOpen())
|
||||
return Loader::ResultStatus::Error;
|
||||
|
||||
std::shared_ptr<RomFSReader> direct_romfs;
|
||||
if (is_encrypted) {
|
||||
romfs_file = std::make_shared<RomFSReader>(std::move(romfs_file_inner), romfs_offset,
|
||||
direct_romfs =
|
||||
std::make_shared<DirectRomFSReader>(std::move(romfs_file_inner), romfs_offset,
|
||||
romfs_size, secondary_key, romfs_ctr, 0x1000);
|
||||
} else {
|
||||
romfs_file =
|
||||
std::make_shared<RomFSReader>(std::move(romfs_file_inner), romfs_offset, romfs_size);
|
||||
direct_romfs = std::make_shared<DirectRomFSReader>(std::move(romfs_file_inner),
|
||||
romfs_offset, romfs_size);
|
||||
}
|
||||
|
||||
const auto path =
|
||||
fmt::format("{}mods/{:016X}/", FileUtil::GetUserPath(FileUtil::UserPath::LoadDir),
|
||||
GetModId(ncch_header.program_id));
|
||||
if (use_layered_fs &&
|
||||
(FileUtil::Exists(path + "romfs/") || FileUtil::Exists(path + "romfs_ext/"))) {
|
||||
|
||||
romfs_file = std::make_shared<LayeredFS>(std::move(direct_romfs), path + "romfs/",
|
||||
path + "romfs_ext/");
|
||||
} else {
|
||||
romfs_file = std::move(direct_romfs);
|
||||
}
|
||||
|
||||
return Loader::ResultStatus::Success;
|
||||
}
|
||||
|
||||
Loader::ResultStatus NCCHContainer::DumpRomFS(const std::string& target_path) {
|
||||
std::shared_ptr<RomFSReader> direct_romfs;
|
||||
Loader::ResultStatus result = ReadRomFS(direct_romfs, false);
|
||||
if (result != Loader::ResultStatus::Success)
|
||||
return result;
|
||||
|
||||
std::shared_ptr<LayeredFS> layered_fs =
|
||||
std::make_shared<LayeredFS>(std::move(direct_romfs), "", "", false);
|
||||
|
||||
if (!layered_fs->DumpRomFS(target_path)) {
|
||||
return Loader::ResultStatus::Error;
|
||||
}
|
||||
return Loader::ResultStatus::Success;
|
||||
}
|
||||
|
||||
|
@ -614,8 +685,9 @@ Loader::ResultStatus NCCHContainer::ReadOverrideRomFS(std::shared_ptr<RomFSReade
|
|||
if (FileUtil::Exists(split_filepath)) {
|
||||
FileUtil::IOFile romfs_file_inner(split_filepath, "rb");
|
||||
if (romfs_file_inner.IsOpen()) {
|
||||
LOG_WARNING(Service_FS, "File {} overriding built-in RomFS", split_filepath);
|
||||
romfs_file = std::make_shared<RomFSReader>(std::move(romfs_file_inner), 0,
|
||||
LOG_WARNING(Service_FS, "File {} overriding built-in RomFS; LayeredFS not enabled",
|
||||
split_filepath);
|
||||
romfs_file = std::make_shared<DirectRomFSReader>(std::move(romfs_file_inner), 0,
|
||||
romfs_file_inner.GetSize());
|
||||
return Loader::ResultStatus::Success;
|
||||
}
|
||||
|
|
|
@ -149,7 +149,8 @@ struct ExHeader_StorageInfo {
|
|||
struct ExHeader_ARM11_SystemLocalCaps {
|
||||
u64_le program_id;
|
||||
u32_le core_version;
|
||||
u8 reserved_flags[2];
|
||||
u8 reserved_flag;
|
||||
u8 n3ds_mode;
|
||||
union {
|
||||
u8 flags0;
|
||||
BitField<0, 2, u8> ideal_processor;
|
||||
|
@ -247,7 +248,15 @@ public:
|
|||
* @param size The size of the romfs
|
||||
* @return ResultStatus result of function
|
||||
*/
|
||||
Loader::ResultStatus ReadRomFS(std::shared_ptr<RomFSReader>& romfs_file);
|
||||
Loader::ResultStatus ReadRomFS(std::shared_ptr<RomFSReader>& romfs_file,
|
||||
bool use_layered_fs = true);
|
||||
|
||||
/**
|
||||
* Dump the RomFS of the NCCH container to the user folder.
|
||||
* @param target_path target path to dump to
|
||||
* @return ResultStatus result of function.
|
||||
*/
|
||||
Loader::ResultStatus DumpRomFS(const std::string& target_path);
|
||||
|
||||
/**
|
||||
* Get the override RomFS of the NCCH container
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
namespace FileSys {
|
||||
|
||||
std::size_t RomFSReader::ReadFile(std::size_t offset, std::size_t length, u8* buffer) {
|
||||
std::size_t DirectRomFSReader::ReadFile(std::size_t offset, std::size_t length, u8* buffer) {
|
||||
if (length == 0)
|
||||
return 0; // Crypto++ does not like zero size buffer
|
||||
file.Seek(file_offset + offset, SEEK_SET);
|
||||
|
|
|
@ -7,23 +7,39 @@
|
|||
|
||||
namespace FileSys {
|
||||
|
||||
/**
|
||||
* Interface for reading RomFS data.
|
||||
*/
|
||||
class RomFSReader {
|
||||
public:
|
||||
RomFSReader(FileUtil::IOFile&& file, std::size_t file_offset, std::size_t data_size)
|
||||
virtual ~RomFSReader() = default;
|
||||
|
||||
virtual std::size_t GetSize() const = 0;
|
||||
virtual std::size_t ReadFile(std::size_t offset, std::size_t length, u8* buffer) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* A RomFS reader that directly reads the RomFS file.
|
||||
*/
|
||||
class DirectRomFSReader : public RomFSReader {
|
||||
public:
|
||||
DirectRomFSReader(FileUtil::IOFile&& file, std::size_t file_offset, std::size_t data_size)
|
||||
: is_encrypted(false), file(std::move(file)), file_offset(file_offset),
|
||||
data_size(data_size) {}
|
||||
|
||||
RomFSReader(FileUtil::IOFile&& file, std::size_t file_offset, std::size_t data_size,
|
||||
DirectRomFSReader(FileUtil::IOFile&& file, std::size_t file_offset, std::size_t data_size,
|
||||
const std::array<u8, 16>& key, const std::array<u8, 16>& ctr,
|
||||
std::size_t crypto_offset)
|
||||
: is_encrypted(true), file(std::move(file)), key(key), ctr(ctr), file_offset(file_offset),
|
||||
crypto_offset(crypto_offset), data_size(data_size) {}
|
||||
|
||||
std::size_t GetSize() const {
|
||||
~DirectRomFSReader() override = default;
|
||||
|
||||
std::size_t GetSize() const override {
|
||||
return data_size;
|
||||
}
|
||||
|
||||
std::size_t ReadFile(std::size_t offset, std::size_t length, u8* buffer);
|
||||
std::size_t ReadFile(std::size_t offset, std::size_t length, u8* buffer) override;
|
||||
|
||||
private:
|
||||
bool is_encrypted;
|
||||
|
@ -34,7 +50,7 @@ private:
|
|||
u64 crypto_offset;
|
||||
u64 data_size;
|
||||
|
||||
RomFSReader() = default;
|
||||
DirectRomFSReader() = default;
|
||||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int) {
|
||||
|
|
|
@ -121,6 +121,7 @@ constexpr char target_xml[] =
|
|||
)";
|
||||
|
||||
int gdbserver_socket = -1;
|
||||
bool defer_start = false;
|
||||
|
||||
u8 command_buffer[GDB_BUFFER_SIZE];
|
||||
u32 command_length;
|
||||
|
@ -160,12 +161,16 @@ BreakpointMap breakpoints_write;
|
|||
} // Anonymous namespace
|
||||
|
||||
static Kernel::Thread* FindThreadById(int id) {
|
||||
const auto& threads = Core::System::GetInstance().Kernel().GetThreadManager().GetThreadList();
|
||||
u32 num_cores = Core::GetNumCores();
|
||||
for (u32 i = 0; i < num_cores; ++i) {
|
||||
const auto& threads =
|
||||
Core::System::GetInstance().Kernel().GetThreadManager(i).GetThreadList();
|
||||
for (auto& thread : threads) {
|
||||
if (thread->GetThreadId() == static_cast<u32>(id)) {
|
||||
return thread.get();
|
||||
}
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -414,7 +419,10 @@ static void RemoveBreakpoint(BreakpointType type, VAddr addr) {
|
|||
Core::System::GetInstance().Memory().WriteBlock(
|
||||
*Core::System::GetInstance().Kernel().GetCurrentProcess(), bp->second.addr,
|
||||
bp->second.inst.data(), bp->second.inst.size());
|
||||
Core::CPU().ClearInstructionCache();
|
||||
u32 num_cores = Core::GetNumCores();
|
||||
for (u32 i = 0; i < num_cores; ++i) {
|
||||
Core::GetCore(i).ClearInstructionCache();
|
||||
}
|
||||
}
|
||||
p.erase(addr);
|
||||
}
|
||||
|
@ -540,11 +548,14 @@ static void HandleQuery() {
|
|||
SendReply(target_xml);
|
||||
} else if (strncmp(query, "fThreadInfo", strlen("fThreadInfo")) == 0) {
|
||||
std::string val = "m";
|
||||
u32 num_cores = Core::GetNumCores();
|
||||
for (u32 i = 0; i < num_cores; ++i) {
|
||||
const auto& threads =
|
||||
Core::System::GetInstance().Kernel().GetThreadManager().GetThreadList();
|
||||
Core::System::GetInstance().Kernel().GetThreadManager(i).GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
val += fmt::format("{:x},", thread->GetThreadId());
|
||||
}
|
||||
}
|
||||
val.pop_back();
|
||||
SendReply(val.c_str());
|
||||
} else if (strncmp(query, "sThreadInfo", strlen("sThreadInfo")) == 0) {
|
||||
|
@ -553,12 +564,15 @@ static void HandleQuery() {
|
|||
std::string buffer;
|
||||
buffer += "l<?xml version=\"1.0\"?>";
|
||||
buffer += "<threads>";
|
||||
u32 num_cores = Core::GetNumCores();
|
||||
for (u32 i = 0; i < num_cores; ++i) {
|
||||
const auto& threads =
|
||||
Core::System::GetInstance().Kernel().GetThreadManager().GetThreadList();
|
||||
Core::System::GetInstance().Kernel().GetThreadManager(i).GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
buffer += fmt::format(R"*(<thread id="{:x}" name="Thread {:x}"></thread>)*",
|
||||
thread->GetThreadId(), thread->GetThreadId());
|
||||
}
|
||||
}
|
||||
buffer += "</threads>";
|
||||
SendReply(buffer.c_str());
|
||||
} else {
|
||||
|
@ -619,9 +633,9 @@ static void SendSignal(Kernel::Thread* thread, u32 signal, bool full = true) {
|
|||
if (full) {
|
||||
|
||||
buffer = fmt::format("T{:02x}{:02x}:{:08x};{:02x}:{:08x};{:02x}:{:08x}", latest_signal,
|
||||
PC_REGISTER, htonl(Core::CPU().GetPC()), SP_REGISTER,
|
||||
htonl(Core::CPU().GetReg(SP_REGISTER)), LR_REGISTER,
|
||||
htonl(Core::CPU().GetReg(LR_REGISTER)));
|
||||
PC_REGISTER, htonl(Core::GetRunningCore().GetPC()), SP_REGISTER,
|
||||
htonl(Core::GetRunningCore().GetReg(SP_REGISTER)), LR_REGISTER,
|
||||
htonl(Core::GetRunningCore().GetReg(LR_REGISTER)));
|
||||
} else {
|
||||
buffer = fmt::format("T{:02x}", latest_signal);
|
||||
}
|
||||
|
@ -782,7 +796,7 @@ static void WriteRegister() {
|
|||
return SendReply("E01");
|
||||
}
|
||||
|
||||
Core::CPU().LoadContext(current_thread->context);
|
||||
Core::GetRunningCore().LoadContext(current_thread->context);
|
||||
|
||||
SendReply("OK");
|
||||
}
|
||||
|
@ -812,7 +826,7 @@ static void WriteRegisters() {
|
|||
}
|
||||
}
|
||||
|
||||
Core::CPU().LoadContext(current_thread->context);
|
||||
Core::GetRunningCore().LoadContext(current_thread->context);
|
||||
|
||||
SendReply("OK");
|
||||
}
|
||||
|
@ -869,7 +883,7 @@ static void WriteMemory() {
|
|||
GdbHexToMem(data.data(), len_pos + 1, len);
|
||||
Core::System::GetInstance().Memory().WriteBlock(
|
||||
*Core::System::GetInstance().Kernel().GetCurrentProcess(), addr, data.data(), len);
|
||||
Core::CPU().ClearInstructionCache();
|
||||
Core::GetRunningCore().ClearInstructionCache();
|
||||
SendReply("OK");
|
||||
}
|
||||
|
||||
|
@ -883,12 +897,12 @@ void Break(bool is_memory_break) {
|
|||
static void Step() {
|
||||
if (command_length > 1) {
|
||||
RegWrite(PC_REGISTER, GdbHexToInt(command_buffer + 1), current_thread);
|
||||
Core::CPU().LoadContext(current_thread->context);
|
||||
Core::GetRunningCore().LoadContext(current_thread->context);
|
||||
}
|
||||
step_loop = true;
|
||||
halt_loop = true;
|
||||
send_trap = true;
|
||||
Core::CPU().ClearInstructionCache();
|
||||
Core::GetRunningCore().ClearInstructionCache();
|
||||
}
|
||||
|
||||
bool IsMemoryBreak() {
|
||||
|
@ -904,7 +918,7 @@ static void Continue() {
|
|||
memory_break = false;
|
||||
step_loop = false;
|
||||
halt_loop = false;
|
||||
Core::CPU().ClearInstructionCache();
|
||||
Core::GetRunningCore().ClearInstructionCache();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -930,7 +944,7 @@ static bool CommitBreakpoint(BreakpointType type, VAddr addr, u32 len) {
|
|||
Core::System::GetInstance().Memory().WriteBlock(
|
||||
*Core::System::GetInstance().Kernel().GetCurrentProcess(), addr, btrap.data(),
|
||||
btrap.size());
|
||||
Core::CPU().ClearInstructionCache();
|
||||
Core::GetRunningCore().ClearInstructionCache();
|
||||
}
|
||||
p.insert({addr, breakpoint});
|
||||
|
||||
|
@ -1030,6 +1044,9 @@ static void RemoveBreakpoint() {
|
|||
|
||||
void HandlePacket() {
|
||||
if (!IsConnected()) {
|
||||
if (defer_start) {
|
||||
ToggleServer(true);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1120,6 +1137,10 @@ void ToggleServer(bool status) {
|
|||
}
|
||||
}
|
||||
|
||||
void DeferStart() {
|
||||
defer_start = true;
|
||||
}
|
||||
|
||||
static void Init(u16 port) {
|
||||
if (!server_enabled) {
|
||||
// Set the halt loop to false in case the user enabled the gdbstub mid-execution.
|
||||
|
@ -1203,6 +1224,7 @@ void Shutdown() {
|
|||
if (!server_enabled) {
|
||||
return;
|
||||
}
|
||||
defer_start = false;
|
||||
|
||||
LOG_INFO(Debug_GDBStub, "Stopping GDB ...");
|
||||
if (gdbserver_socket != -1) {
|
||||
|
|
|
@ -42,6 +42,13 @@ void ToggleServer(bool status);
|
|||
/// Start the gdbstub server.
|
||||
void Init();
|
||||
|
||||
/**
|
||||
* Defer initialization of the gdbstub to the first packet processing functions.
|
||||
* This avoids a case where the gdbstub thread is frozen after initialization
|
||||
* and fails to respond in time to packets.
|
||||
*/
|
||||
void DeferStart();
|
||||
|
||||
/// Stop gdbstub server.
|
||||
void Shutdown();
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ bool HandleTable::IsValid(Handle handle) const {
|
|||
|
||||
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
|
||||
if (handle == CurrentThread) {
|
||||
return SharedFrom(kernel.GetThreadManager().GetCurrentThread());
|
||||
return SharedFrom(kernel.GetCurrentThreadManager().GetCurrentThread());
|
||||
} else if (handle == CurrentProcess) {
|
||||
return kernel.GetCurrentProcess();
|
||||
}
|
||||
|
|
|
@ -20,22 +20,30 @@ namespace Kernel {
|
|||
|
||||
/// Initialize the kernel
|
||||
KernelSystem::KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing,
|
||||
std::function<void()> prepare_reschedule_callback, u32 system_mode)
|
||||
std::function<void()> prepare_reschedule_callback, u32 system_mode,
|
||||
u32 num_cores, u8 n3ds_mode)
|
||||
: memory(memory), timing(timing),
|
||||
prepare_reschedule_callback(std::move(prepare_reschedule_callback)) {
|
||||
for (auto i = 0; i < memory_regions.size(); i++) {
|
||||
memory_regions[i] = std::make_shared<MemoryRegionInfo>();
|
||||
}
|
||||
MemoryInit(system_mode);
|
||||
MemoryInit(system_mode, n3ds_mode);
|
||||
|
||||
resource_limits = std::make_unique<ResourceLimitList>(*this);
|
||||
thread_manager = std::make_unique<ThreadManager>(*this);
|
||||
for (u32 core_id = 0; core_id < num_cores; ++core_id) {
|
||||
thread_managers.push_back(std::make_unique<ThreadManager>(*this, core_id));
|
||||
}
|
||||
timer_manager = std::make_unique<TimerManager>(timing);
|
||||
ipc_recorder = std::make_unique<IPCDebugger::Recorder>();
|
||||
stored_processes.assign(num_cores, nullptr);
|
||||
|
||||
next_thread_id = 1;
|
||||
}
|
||||
|
||||
/// Shutdown the kernel
|
||||
KernelSystem::~KernelSystem() = default;
|
||||
KernelSystem::~KernelSystem() {
|
||||
ResetThreadIDs();
|
||||
};
|
||||
|
||||
ResourceLimitList& KernelSystem::ResourceLimit() {
|
||||
return *resource_limits;
|
||||
|
@ -58,6 +66,15 @@ void KernelSystem::SetCurrentProcess(std::shared_ptr<Process> process) {
|
|||
SetCurrentMemoryPageTable(process->vm_manager.page_table);
|
||||
}
|
||||
|
||||
void KernelSystem::SetCurrentProcessForCPU(std::shared_ptr<Process> process, u32 core_id) {
|
||||
if (current_cpu->GetID() == core_id) {
|
||||
current_process = process;
|
||||
SetCurrentMemoryPageTable(process->vm_manager.page_table);
|
||||
} else {
|
||||
stored_processes[core_id] = process;
|
||||
}
|
||||
}
|
||||
|
||||
void KernelSystem::SetCurrentMemoryPageTable(std::shared_ptr<Memory::PageTable> page_table) {
|
||||
memory.SetCurrentPageTable(page_table);
|
||||
if (current_cpu != nullptr) {
|
||||
|
@ -65,17 +82,39 @@ void KernelSystem::SetCurrentMemoryPageTable(std::shared_ptr<Memory::PageTable>
|
|||
}
|
||||
}
|
||||
|
||||
void KernelSystem::SetCPU(std::shared_ptr<ARM_Interface> cpu) {
|
||||
void KernelSystem::SetCPUs(std::vector<std::shared_ptr<ARM_Interface>> cpus) {
|
||||
ASSERT(cpus.size() == thread_managers.size());
|
||||
u32 i = 0;
|
||||
for (const auto& cpu : cpus) {
|
||||
thread_managers[i++]->SetCPU(*cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void KernelSystem::SetRunningCPU(std::shared_ptr<ARM_Interface> cpu) {
|
||||
if (current_process) {
|
||||
stored_processes[current_cpu->GetID()] = current_process;
|
||||
}
|
||||
current_cpu = cpu;
|
||||
thread_manager->SetCPU(*cpu);
|
||||
timing.SetCurrentTimer(cpu->GetID());
|
||||
if (stored_processes[current_cpu->GetID()]) {
|
||||
SetCurrentProcess(stored_processes[current_cpu->GetID()]);
|
||||
}
|
||||
}
|
||||
|
||||
ThreadManager& KernelSystem::GetThreadManager() {
|
||||
return *thread_manager;
|
||||
ThreadManager& KernelSystem::GetThreadManager(u32 core_id) {
|
||||
return *thread_managers[core_id];
|
||||
}
|
||||
|
||||
const ThreadManager& KernelSystem::GetThreadManager() const {
|
||||
return *thread_manager;
|
||||
const ThreadManager& KernelSystem::GetThreadManager(u32 core_id) const {
|
||||
return *thread_managers[core_id];
|
||||
}
|
||||
|
||||
ThreadManager& KernelSystem::GetCurrentThreadManager() {
|
||||
return *thread_managers[current_cpu->GetID()];
|
||||
}
|
||||
|
||||
const ThreadManager& KernelSystem::GetCurrentThreadManager() const {
|
||||
return *thread_managers[current_cpu->GetID()];
|
||||
}
|
||||
|
||||
TimerManager& KernelSystem::GetTimerManager() {
|
||||
|
@ -106,6 +145,14 @@ void KernelSystem::AddNamedPort(std::string name, std::shared_ptr<ClientPort> po
|
|||
named_ports.emplace(std::move(name), std::move(port));
|
||||
}
|
||||
|
||||
u32 KernelSystem::NewThreadId() {
|
||||
return next_thread_id++;
|
||||
}
|
||||
|
||||
void KernelSystem::ResetThreadIDs() {
|
||||
next_thread_id = 0;
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void KernelSystem::serialize(Archive& ar, const unsigned int file_version) {
|
||||
ar& memory_regions;
|
||||
|
@ -118,9 +165,14 @@ void KernelSystem::serialize(Archive& ar, const unsigned int file_version) {
|
|||
ar& next_process_id;
|
||||
ar& process_list;
|
||||
ar& current_process;
|
||||
// NB: core count checked in 'core'
|
||||
for (auto& thread_manager : thread_managers) {
|
||||
ar&* thread_manager.get();
|
||||
}
|
||||
ar& config_mem_handler;
|
||||
ar& shared_page_handler;
|
||||
ar& stored_processes;
|
||||
ar& next_thread_id;
|
||||
// Deliberately don't include debugger info to allow debugging through loads
|
||||
}
|
||||
|
||||
|
|
|
@ -88,7 +88,8 @@ enum class MemoryRegion : u16 {
|
|||
class KernelSystem {
|
||||
public:
|
||||
explicit KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing,
|
||||
std::function<void()> prepare_reschedule_callback, u32 system_mode);
|
||||
std::function<void()> prepare_reschedule_callback, u32 system_mode,
|
||||
u32 num_cores, u8 n3ds_mode);
|
||||
~KernelSystem();
|
||||
|
||||
using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>;
|
||||
|
@ -214,13 +215,19 @@ public:
|
|||
|
||||
std::shared_ptr<Process> GetCurrentProcess() const;
|
||||
void SetCurrentProcess(std::shared_ptr<Process> process);
|
||||
void SetCurrentProcessForCPU(std::shared_ptr<Process> process, u32 core_id);
|
||||
|
||||
void SetCurrentMemoryPageTable(std::shared_ptr<Memory::PageTable> page_table);
|
||||
|
||||
void SetCPU(std::shared_ptr<ARM_Interface> cpu);
|
||||
void SetCPUs(std::vector<std::shared_ptr<ARM_Interface>> cpu);
|
||||
|
||||
ThreadManager& GetThreadManager();
|
||||
const ThreadManager& GetThreadManager() const;
|
||||
void SetRunningCPU(std::shared_ptr<ARM_Interface> cpu);
|
||||
|
||||
ThreadManager& GetThreadManager(u32 core_id);
|
||||
const ThreadManager& GetThreadManager(u32 core_id) const;
|
||||
|
||||
ThreadManager& GetCurrentThreadManager();
|
||||
const ThreadManager& GetCurrentThreadManager() const;
|
||||
|
||||
TimerManager& GetTimerManager();
|
||||
const TimerManager& GetTimerManager() const;
|
||||
|
@ -246,6 +253,10 @@ public:
|
|||
prepare_reschedule_callback();
|
||||
}
|
||||
|
||||
u32 NewThreadId();
|
||||
|
||||
void ResetThreadIDs();
|
||||
|
||||
/// Map of named ports managed by the kernel, which can be retrieved using the ConnectToPort
|
||||
std::unordered_map<std::string, std::shared_ptr<ClientPort>> named_ports;
|
||||
|
||||
|
@ -256,7 +267,7 @@ public:
|
|||
Core::Timing& timing;
|
||||
|
||||
private:
|
||||
void MemoryInit(u32 mem_type);
|
||||
void MemoryInit(u32 mem_type, u8 n3ds_mode);
|
||||
|
||||
std::function<void()> prepare_reschedule_callback;
|
||||
|
||||
|
@ -280,14 +291,17 @@ private:
|
|||
std::vector<std::shared_ptr<Process>> process_list;
|
||||
|
||||
std::shared_ptr<Process> current_process;
|
||||
std::vector<std::shared_ptr<Process>> stored_processes;
|
||||
|
||||
std::unique_ptr<ThreadManager> thread_manager;
|
||||
std::vector<std::unique_ptr<ThreadManager>> thread_managers;
|
||||
|
||||
std::shared_ptr<ConfigMem::Handler> config_mem_handler;
|
||||
std::shared_ptr<SharedPage::Handler> shared_page_handler;
|
||||
|
||||
std::unique_ptr<IPCDebugger::Recorder> ipc_recorder;
|
||||
|
||||
u32 next_thread_id;
|
||||
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int file_version);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "core/hle/kernel/vm_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
@ -40,11 +41,32 @@ static const u32 memory_region_sizes[8][3] = {
|
|||
{0x0B200000, 0x02E00000, 0x02000000}, // 7
|
||||
};
|
||||
|
||||
void KernelSystem::MemoryInit(u32 mem_type) {
|
||||
// TODO(yuriks): On the n3DS, all o3DS configurations (<=5) are forced to 6 instead.
|
||||
ASSERT_MSG(mem_type <= 5, "New 3DS memory configuration aren't supported yet!");
|
||||
namespace MemoryMode {
|
||||
enum N3DSMode : u8 {
|
||||
Mode6 = 1,
|
||||
Mode7 = 2,
|
||||
Mode6_2 = 3,
|
||||
};
|
||||
}
|
||||
|
||||
void KernelSystem::MemoryInit(u32 mem_type, u8 n3ds_mode) {
|
||||
ASSERT(mem_type != 1);
|
||||
|
||||
const bool is_new_3ds = Settings::values.is_new_3ds;
|
||||
u32 reported_mem_type = mem_type;
|
||||
if (is_new_3ds) {
|
||||
if (n3ds_mode == MemoryMode::Mode6 || n3ds_mode == MemoryMode::Mode6_2) {
|
||||
mem_type = 6;
|
||||
reported_mem_type = 6;
|
||||
} else if (n3ds_mode == MemoryMode::Mode7) {
|
||||
mem_type = 7;
|
||||
reported_mem_type = 7;
|
||||
} else {
|
||||
// On the N3ds, all O3ds configurations (<=5) are forced to 6 instead.
|
||||
mem_type = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// The kernel allocation regions (APPLICATION, SYSTEM and BASE) are laid out in sequence, with
|
||||
// the sizes specified in the memory_region_sizes table.
|
||||
VAddr base = 0;
|
||||
|
@ -55,14 +77,12 @@ void KernelSystem::MemoryInit(u32 mem_type) {
|
|||
}
|
||||
|
||||
// We must've allocated the entire FCRAM by the end
|
||||
ASSERT(base == Memory::FCRAM_SIZE);
|
||||
ASSERT(base == (is_new_3ds ? Memory::FCRAM_N3DS_SIZE : Memory::FCRAM_SIZE));
|
||||
|
||||
config_mem_handler = std::make_shared<ConfigMem::Handler>();
|
||||
auto& config_mem = config_mem_handler->GetConfigMem();
|
||||
config_mem.app_mem_type = mem_type;
|
||||
// app_mem_malloc does not always match the configured size for memory_region[0]: in case the
|
||||
// n3DS type override is in effect it reports the size the game expects, not the real one.
|
||||
config_mem.app_mem_alloc = memory_region_sizes[mem_type][0];
|
||||
config_mem.app_mem_type = reported_mem_type;
|
||||
config_mem.app_mem_alloc = memory_region_sizes[reported_mem_type][0];
|
||||
config_mem.sys_mem_alloc = memory_regions[1]->size;
|
||||
config_mem.base_mem_alloc = memory_regions[2]->size;
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ std::shared_ptr<Mutex> KernelSystem::CreateMutex(bool initial_locked, std::strin
|
|||
|
||||
// Acquire mutex with current thread if initialized as locked
|
||||
if (initial_locked)
|
||||
mutex->Acquire(thread_manager->GetCurrentThread());
|
||||
mutex->Acquire(thread_managers[current_cpu->GetID()]->GetCurrentThread());
|
||||
|
||||
return mutex;
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ Handler::Handler(Core::Timing& timing) : timing(timing) {
|
|||
using namespace std::placeholders;
|
||||
update_time_event = timing.RegisterEvent("SharedPage::UpdateTimeCallback",
|
||||
std::bind(&Handler::UpdateTimeCallback, this, _1, _2));
|
||||
timing.ScheduleEvent(0, update_time_event);
|
||||
timing.ScheduleEvent(0, update_time_event, 0, 0);
|
||||
|
||||
float slidestate = Settings::values.factor_3d / 100.0f;
|
||||
shared_page.sliderstate_3d = static_cast<float_le>(slidestate);
|
||||
|
|
|
@ -280,12 +280,12 @@ void SVC::ExitProcess() {
|
|||
current_process->status = ProcessStatus::Exited;
|
||||
|
||||
// Stop all the process threads that are currently waiting for objects.
|
||||
auto& thread_list = kernel.GetThreadManager().GetThreadList();
|
||||
auto& thread_list = kernel.GetCurrentThreadManager().GetThreadList();
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->owner_process != current_process)
|
||||
continue;
|
||||
|
||||
if (thread.get() == kernel.GetThreadManager().GetCurrentThread())
|
||||
if (thread.get() == kernel.GetCurrentThreadManager().GetCurrentThread())
|
||||
continue;
|
||||
|
||||
// TODO(Subv): When are the other running/ready threads terminated?
|
||||
|
@ -297,7 +297,7 @@ void SVC::ExitProcess() {
|
|||
}
|
||||
|
||||
// Kill the current thread
|
||||
kernel.GetThreadManager().GetCurrentThread()->Stop();
|
||||
kernel.GetCurrentThreadManager().GetCurrentThread()->Stop();
|
||||
|
||||
system.PrepareReschedule();
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ ResultCode SVC::SendSyncRequest(Handle handle) {
|
|||
|
||||
system.PrepareReschedule();
|
||||
|
||||
auto thread = SharedFrom(kernel.GetThreadManager().GetCurrentThread());
|
||||
auto thread = SharedFrom(kernel.GetCurrentThreadManager().GetCurrentThread());
|
||||
|
||||
if (kernel.GetIPCRecorder().IsEnabled()) {
|
||||
kernel.GetIPCRecorder().RegisterRequest(session, thread);
|
||||
|
@ -476,7 +476,7 @@ private:
|
|||
/// Wait for a handle to synchronize, timeout after the specified nanoseconds
|
||||
ResultCode SVC::WaitSynchronization1(Handle handle, s64 nano_seconds) {
|
||||
auto object = kernel.GetCurrentProcess()->handle_table.Get<WaitObject>(handle);
|
||||
Thread* thread = kernel.GetThreadManager().GetCurrentThread();
|
||||
Thread* thread = kernel.GetCurrentThreadManager().GetCurrentThread();
|
||||
|
||||
if (object == nullptr)
|
||||
return ERR_INVALID_HANDLE;
|
||||
|
@ -514,7 +514,7 @@ ResultCode SVC::WaitSynchronization1(Handle handle, s64 nano_seconds) {
|
|||
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
|
||||
ResultCode SVC::WaitSynchronizationN(s32* out, VAddr handles_address, s32 handle_count,
|
||||
bool wait_all, s64 nano_seconds) {
|
||||
Thread* thread = kernel.GetThreadManager().GetCurrentThread();
|
||||
Thread* thread = kernel.GetCurrentThreadManager().GetCurrentThread();
|
||||
|
||||
if (!Memory::IsValidVirtualAddress(*kernel.GetCurrentProcess(), handles_address))
|
||||
return ERR_INVALID_POINTER;
|
||||
|
@ -684,7 +684,7 @@ ResultCode SVC::ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_co
|
|||
|
||||
// We are also sending a command reply.
|
||||
// Do not send a reply if the command id in the command buffer is 0xFFFF.
|
||||
Thread* thread = kernel.GetThreadManager().GetCurrentThread();
|
||||
Thread* thread = kernel.GetCurrentThreadManager().GetCurrentThread();
|
||||
u32 cmd_buff_header = memory.Read32(thread->GetCommandBufferAddress());
|
||||
IPC::Header header{cmd_buff_header};
|
||||
if (reply_target != 0 && header.command_id != 0xFFFF) {
|
||||
|
@ -791,7 +791,7 @@ ResultCode SVC::ArbitrateAddress(Handle handle, u32 address, u32 type, u32 value
|
|||
return ERR_INVALID_HANDLE;
|
||||
|
||||
auto res =
|
||||
arbiter->ArbitrateAddress(SharedFrom(kernel.GetThreadManager().GetCurrentThread()),
|
||||
arbiter->ArbitrateAddress(SharedFrom(kernel.GetCurrentThreadManager().GetCurrentThread()),
|
||||
static_cast<ArbitrationType>(type), address, value, nanoseconds);
|
||||
|
||||
// TODO(Subv): Identify in which specific cases this call should cause a reschedule.
|
||||
|
@ -912,14 +912,19 @@ ResultCode SVC::CreateThread(Handle* out_handle, u32 entry_point, u32 arg, VAddr
|
|||
break;
|
||||
case ThreadProcessorIdAll:
|
||||
LOG_INFO(Kernel_SVC,
|
||||
"Newly created thread is allowed to be run in any Core, unimplemented.");
|
||||
"Newly created thread is allowed to be run in any Core, for now run in core 0.");
|
||||
processor_id = ThreadProcessorId0;
|
||||
break;
|
||||
case ThreadProcessorId1:
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
"Newly created thread must run in the SysCore (Core1), unimplemented.");
|
||||
case ThreadProcessorId2:
|
||||
case ThreadProcessorId3:
|
||||
// TODO: Check and log for: When processorid==0x2 and the process is not a BASE mem-region
|
||||
// process, exheader kernel-flags bitmask 0x2000 must be set (otherwise error 0xD9001BEA is
|
||||
// returned). When processorid==0x3 and the process is not a BASE mem-region process, error
|
||||
// 0xD9001BEA is returned. These are the only restriction checks done by the kernel for
|
||||
// processorid.
|
||||
break;
|
||||
default:
|
||||
// TODO(bunnei): Implement support for other processor IDs
|
||||
ASSERT_MSG(false, "Unsupported thread processor ID: {}", processor_id);
|
||||
break;
|
||||
}
|
||||
|
@ -945,9 +950,9 @@ ResultCode SVC::CreateThread(Handle* out_handle, u32 entry_point, u32 arg, VAddr
|
|||
|
||||
/// Called when a thread exits
|
||||
void SVC::ExitThread() {
|
||||
LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", system.CPU().GetPC());
|
||||
LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", system.GetRunningCore().GetPC());
|
||||
|
||||
kernel.GetThreadManager().ExitCurrentThread();
|
||||
kernel.GetCurrentThreadManager().ExitCurrentThread();
|
||||
system.PrepareReschedule();
|
||||
}
|
||||
|
||||
|
@ -993,7 +998,7 @@ ResultCode SVC::SetThreadPriority(Handle handle, u32 priority) {
|
|||
/// Create a mutex
|
||||
ResultCode SVC::CreateMutex(Handle* out_handle, u32 initial_locked) {
|
||||
std::shared_ptr<Mutex> mutex = kernel.CreateMutex(initial_locked != 0);
|
||||
mutex->name = fmt::format("mutex-{:08x}", system.CPU().GetReg(14));
|
||||
mutex->name = fmt::format("mutex-{:08x}", system.GetRunningCore().GetReg(14));
|
||||
CASCADE_RESULT(*out_handle, kernel.GetCurrentProcess()->handle_table.Create(std::move(mutex)));
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called initial_locked={} : created handle=0x{:08X}",
|
||||
|
@ -1010,7 +1015,7 @@ ResultCode SVC::ReleaseMutex(Handle handle) {
|
|||
if (mutex == nullptr)
|
||||
return ERR_INVALID_HANDLE;
|
||||
|
||||
return mutex->Release(kernel.GetThreadManager().GetCurrentThread());
|
||||
return mutex->Release(kernel.GetCurrentThreadManager().GetCurrentThread());
|
||||
}
|
||||
|
||||
/// Get the ID of the specified process
|
||||
|
@ -1060,7 +1065,7 @@ ResultCode SVC::GetThreadId(u32* thread_id, Handle handle) {
|
|||
ResultCode SVC::CreateSemaphore(Handle* out_handle, s32 initial_count, s32 max_count) {
|
||||
CASCADE_RESULT(std::shared_ptr<Semaphore> semaphore,
|
||||
kernel.CreateSemaphore(initial_count, max_count));
|
||||
semaphore->name = fmt::format("semaphore-{:08x}", system.CPU().GetReg(14));
|
||||
semaphore->name = fmt::format("semaphore-{:08x}", system.GetRunningCore().GetReg(14));
|
||||
CASCADE_RESULT(*out_handle,
|
||||
kernel.GetCurrentProcess()->handle_table.Create(std::move(semaphore)));
|
||||
|
||||
|
@ -1130,8 +1135,9 @@ ResultCode SVC::QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, u32 ad
|
|||
|
||||
/// Create an event
|
||||
ResultCode SVC::CreateEvent(Handle* out_handle, u32 reset_type) {
|
||||
std::shared_ptr<Event> evt = kernel.CreateEvent(
|
||||
static_cast<ResetType>(reset_type), fmt::format("event-{:08x}", system.CPU().GetReg(14)));
|
||||
std::shared_ptr<Event> evt =
|
||||
kernel.CreateEvent(static_cast<ResetType>(reset_type),
|
||||
fmt::format("event-{:08x}", system.GetRunningCore().GetReg(14)));
|
||||
CASCADE_RESULT(*out_handle, kernel.GetCurrentProcess()->handle_table.Create(std::move(evt)));
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called reset_type=0x{:08X} : created handle=0x{:08X}", reset_type,
|
||||
|
@ -1173,8 +1179,9 @@ ResultCode SVC::ClearEvent(Handle handle) {
|
|||
|
||||
/// Creates a timer
|
||||
ResultCode SVC::CreateTimer(Handle* out_handle, u32 reset_type) {
|
||||
std::shared_ptr<Timer> timer = kernel.CreateTimer(
|
||||
static_cast<ResetType>(reset_type), fmt ::format("timer-{:08x}", system.CPU().GetReg(14)));
|
||||
std::shared_ptr<Timer> timer =
|
||||
kernel.CreateTimer(static_cast<ResetType>(reset_type),
|
||||
fmt ::format("timer-{:08x}", system.GetRunningCore().GetReg(14)));
|
||||
CASCADE_RESULT(*out_handle, kernel.GetCurrentProcess()->handle_table.Create(std::move(timer)));
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called reset_type=0x{:08X} : created handle=0x{:08X}", reset_type,
|
||||
|
@ -1228,7 +1235,7 @@ ResultCode SVC::CancelTimer(Handle handle) {
|
|||
void SVC::SleepThread(s64 nanoseconds) {
|
||||
LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
|
||||
|
||||
ThreadManager& thread_manager = kernel.GetThreadManager();
|
||||
ThreadManager& thread_manager = kernel.GetCurrentThreadManager();
|
||||
|
||||
// Don't attempt to yield execution if there are no available threads to run,
|
||||
// this way we avoid a useless reschedule to the idle thread.
|
||||
|
@ -1246,10 +1253,11 @@ void SVC::SleepThread(s64 nanoseconds) {
|
|||
|
||||
/// This returns the total CPU ticks elapsed since the CPU was powered-on
|
||||
s64 SVC::GetSystemTick() {
|
||||
s64 result = system.CoreTiming().GetTicks();
|
||||
// TODO: Use globalTicks here?
|
||||
s64 result = system.GetRunningCore().GetTimer()->GetTicks();
|
||||
// Advance time to defeat dumb games (like Cubic Ninja) that busy-wait for the frame to end.
|
||||
// Measured time between two calls on a 9.2 o3DS with Ninjhax 1.1b
|
||||
system.CoreTiming().AddTicks(150);
|
||||
system.GetRunningCore().GetTimer()->AddTicks(150);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1611,11 +1619,11 @@ void SVC::CallSVC(u32 immediate) {
|
|||
SVC::SVC(Core::System& system) : system(system), kernel(system.Kernel()), memory(system.Memory()) {}
|
||||
|
||||
u32 SVC::GetReg(std::size_t n) {
|
||||
return system.CPU().GetReg(static_cast<int>(n));
|
||||
return system.GetRunningCore().GetReg(static_cast<int>(n));
|
||||
}
|
||||
|
||||
void SVC::SetReg(std::size_t n, u32 value) {
|
||||
system.CPU().SetReg(static_cast<int>(n), value);
|
||||
system.GetRunningCore().SetReg(static_cast<int>(n), value);
|
||||
}
|
||||
|
||||
SVCContext::SVCContext(Core::System& system) : impl(std::make_unique<SVC>(system)) {}
|
||||
|
|
|
@ -62,13 +62,10 @@ void Thread::Acquire(Thread* thread) {
|
|||
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
|
||||
}
|
||||
|
||||
u32 ThreadManager::NewThreadId() {
|
||||
return next_thread_id++;
|
||||
}
|
||||
|
||||
Thread::Thread(KernelSystem& kernel)
|
||||
: WaitObject(kernel), context(kernel.GetThreadManager().NewContext()),
|
||||
thread_manager(kernel.GetThreadManager()) {}
|
||||
Thread::Thread(KernelSystem& kernel, u32 core_id)
|
||||
: WaitObject(kernel), context(kernel.GetThreadManager(core_id).NewContext()),
|
||||
core_id(core_id),
|
||||
thread_manager(kernel.GetThreadManager(core_id)) {}
|
||||
Thread::~Thread() {}
|
||||
|
||||
Thread* ThreadManager::GetCurrentThread() const {
|
||||
|
@ -113,7 +110,7 @@ void ThreadManager::SwitchContext(Thread* new_thread) {
|
|||
|
||||
// Save context for previous thread
|
||||
if (previous_thread) {
|
||||
previous_thread->last_running_ticks = timing.GetTicks();
|
||||
previous_thread->last_running_ticks = timing.GetGlobalTicks();
|
||||
cpu->SaveContext(previous_thread->context);
|
||||
|
||||
if (previous_thread->status == ThreadStatus::Running) {
|
||||
|
@ -140,7 +137,7 @@ void ThreadManager::SwitchContext(Thread* new_thread) {
|
|||
new_thread->status = ThreadStatus::Running;
|
||||
|
||||
if (previous_process != current_thread->owner_process) {
|
||||
kernel.SetCurrentProcess(current_thread->owner_process);
|
||||
kernel.SetCurrentProcessForCPU(current_thread->owner_process, cpu->GetID());
|
||||
}
|
||||
|
||||
cpu->LoadContext(new_thread->context);
|
||||
|
@ -153,7 +150,7 @@ void ThreadManager::SwitchContext(Thread* new_thread) {
|
|||
}
|
||||
|
||||
Thread* ThreadManager::PopNextReadyThread() {
|
||||
Thread* next;
|
||||
Thread* next = nullptr;
|
||||
Thread* thread = GetCurrentThread();
|
||||
|
||||
if (thread && thread->status == ThreadStatus::Running) {
|
||||
|
@ -337,22 +334,22 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread(
|
|||
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
|
||||
}
|
||||
|
||||
auto thread{std::make_shared<Thread>(*this)};
|
||||
auto thread{std::make_shared<Thread>(*this, processor_id)};
|
||||
|
||||
thread_manager->thread_list.push_back(thread);
|
||||
thread_manager->ready_queue.prepare(priority);
|
||||
thread_managers[processor_id]->thread_list.push_back(thread);
|
||||
thread_managers[processor_id]->ready_queue.prepare(priority);
|
||||
|
||||
thread->thread_id = thread_manager->NewThreadId();
|
||||
thread->thread_id = NewThreadId();
|
||||
thread->status = ThreadStatus::Dormant;
|
||||
thread->entry_point = entry_point;
|
||||
thread->stack_top = stack_top;
|
||||
thread->nominal_priority = thread->current_priority = priority;
|
||||
thread->last_running_ticks = timing.GetTicks();
|
||||
thread->last_running_ticks = timing.GetGlobalTicks();
|
||||
thread->processor_id = processor_id;
|
||||
thread->wait_objects.clear();
|
||||
thread->wait_address = 0;
|
||||
thread->name = std::move(name);
|
||||
thread_manager->wakeup_callback_table[thread->thread_id] = thread.get();
|
||||
thread_managers[processor_id]->wakeup_callback_table[thread->thread_id] = thread.get();
|
||||
thread->owner_process = owner_process;
|
||||
|
||||
// Find the next available TLS index, and mark it as used
|
||||
|
@ -397,7 +394,7 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread(
|
|||
// to initialize the context
|
||||
ResetThreadContext(thread->context, stack_top, entry_point, arg);
|
||||
|
||||
thread_manager->ready_queue.push_back(thread->current_priority, thread.get());
|
||||
thread_managers[processor_id]->ready_queue.push_back(thread->current_priority, thread.get());
|
||||
thread->status = ThreadStatus::Ready;
|
||||
|
||||
return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
|
||||
|
@ -463,6 +460,9 @@ void ThreadManager::Reschedule() {
|
|||
LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
|
||||
} else if (next) {
|
||||
LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
|
||||
} else {
|
||||
LOG_TRACE(Kernel, "context switch idle -> idle, do nothing");
|
||||
return;
|
||||
}
|
||||
|
||||
SwitchContext(next);
|
||||
|
@ -489,11 +489,10 @@ VAddr Thread::GetCommandBufferAddress() const {
|
|||
return GetTLSAddress() + command_header_offset;
|
||||
}
|
||||
|
||||
ThreadManager::ThreadManager(Kernel::KernelSystem& kernel) : kernel(kernel) {
|
||||
ThreadWakeupEventType =
|
||||
kernel.timing.RegisterEvent("ThreadWakeupCallback", [this](u64 thread_id, s64 cycle_late) {
|
||||
ThreadWakeupCallback(thread_id, cycle_late);
|
||||
});
|
||||
ThreadManager::ThreadManager(Kernel::KernelSystem& kernel, u32 core_id) : kernel(kernel) {
|
||||
ThreadWakeupEventType = kernel.timing.RegisterEvent(
|
||||
"ThreadWakeupCallback_" + std::to_string(core_id),
|
||||
[this](u64 thread_id, s64 cycle_late) { ThreadWakeupCallback(thread_id, cycle_late); });
|
||||
}
|
||||
|
||||
ThreadManager::~ThreadManager() {
|
||||
|
|
|
@ -38,7 +38,9 @@ enum ThreadProcessorId : s32 {
|
|||
ThreadProcessorIdAll = -1, ///< Run thread on either core
|
||||
ThreadProcessorId0 = 0, ///< Run thread on core 0 (AppCore)
|
||||
ThreadProcessorId1 = 1, ///< Run thread on core 1 (SysCore)
|
||||
ThreadProcessorIdMax = 2, ///< Processor ID must be less than this
|
||||
ThreadProcessorId2 = 2, ///< Run thread on core 2 (additional n3ds core)
|
||||
ThreadProcessorId3 = 3, ///< Run thread on core 3 (additional n3ds core)
|
||||
ThreadProcessorIdMax = 4, ///< Processor ID must be less than this
|
||||
};
|
||||
|
||||
enum class ThreadStatus {
|
||||
|
@ -75,15 +77,9 @@ private:
|
|||
|
||||
class ThreadManager {
|
||||
public:
|
||||
explicit ThreadManager(Kernel::KernelSystem& kernel);
|
||||
explicit ThreadManager(Kernel::KernelSystem& kernel, u32 core_id);
|
||||
~ThreadManager();
|
||||
|
||||
/**
|
||||
* Creates a new thread ID
|
||||
* @return The new thread ID
|
||||
*/
|
||||
u32 NewThreadId();
|
||||
|
||||
/**
|
||||
* Gets the current thread
|
||||
*/
|
||||
|
@ -150,7 +146,6 @@ private:
|
|||
Kernel::KernelSystem& kernel;
|
||||
ARM_Interface* cpu;
|
||||
|
||||
u32 next_thread_id = 1;
|
||||
std::shared_ptr<Thread> current_thread;
|
||||
Common::ThreadQueueList<Thread*, ThreadPrioLowest + 1> ready_queue;
|
||||
std::unordered_map<u64, Thread*> wakeup_callback_table;
|
||||
|
@ -167,7 +162,6 @@ private:
|
|||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int file_version) {
|
||||
ar& next_thread_id;
|
||||
ar& current_thread;
|
||||
ar& ready_queue;
|
||||
ar& wakeup_callback_table;
|
||||
|
@ -177,7 +171,7 @@ private:
|
|||
|
||||
class Thread final : public WaitObject {
|
||||
public:
|
||||
explicit Thread(KernelSystem&);
|
||||
explicit Thread(KernelSystem&, u32 core_id);
|
||||
~Thread() override;
|
||||
|
||||
std::string GetName() const override {
|
||||
|
@ -329,6 +323,8 @@ public:
|
|||
// available. In case of a timeout, the object will be nullptr.
|
||||
std::shared_ptr<WakeupCallback> wakeup_callback;
|
||||
|
||||
const u32 core_id;
|
||||
|
||||
private:
|
||||
ThreadManager& thread_manager;
|
||||
|
||||
|
@ -351,4 +347,20 @@ std::shared_ptr<Thread> SetupMainThread(KernelSystem& kernel, u32 entry_point, u
|
|||
} // namespace Kernel
|
||||
|
||||
BOOST_CLASS_EXPORT_KEY(Kernel::Thread)
|
||||
CONSTRUCT_KERNEL_OBJECT(Kernel::Thread)
|
||||
|
||||
namespace boost::serialization {
|
||||
|
||||
template <class Archive>
|
||||
inline void save_construct_data(Archive& ar, const Kernel::Thread* t,
|
||||
const unsigned int file_version) {
|
||||
ar << t->core_id;
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
inline void load_construct_data(Archive& ar, Kernel::Thread* t, const unsigned int file_version) {
|
||||
u32 core_id;
|
||||
ar >> core_id;
|
||||
::new (t) Kernel::Thread(Core::Global<Kernel::KernelSystem>(), core_id);
|
||||
}
|
||||
|
||||
} // namespace boost::serialization
|
||||
|
|
|
@ -6,9 +6,7 @@
|
|||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#ifndef _MSC_VER
|
||||
#include <cstddef>
|
||||
#endif
|
||||
#include <memory>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
|
@ -177,10 +175,6 @@ struct GyroscopeCalibrateParam {
|
|||
} x, y, z;
|
||||
};
|
||||
|
||||
// TODO: MSVC does not support using offsetof() on non-static data members even though this
|
||||
// is technically allowed since C++11. This macro should be enabled once MSVC adds
|
||||
// support for that.
|
||||
#ifndef _MSC_VER
|
||||
#define ASSERT_REG_POSITION(field_name, position) \
|
||||
static_assert(offsetof(SharedMem, field_name) == position * 4, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
@ -189,7 +183,6 @@ ASSERT_REG_POSITION(pad.index_reset_ticks, 0x0);
|
|||
ASSERT_REG_POSITION(touch.index_reset_ticks, 0x2A);
|
||||
|
||||
#undef ASSERT_REG_POSITION
|
||||
#endif // !defined(_MSC_VER)
|
||||
|
||||
struct DirectionState {
|
||||
bool up;
|
||||
|
|
|
@ -2,9 +2,14 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <atomic>
|
||||
#ifdef ENABLE_WEB_SERVICE
|
||||
#include <LUrlParser.h>
|
||||
#endif
|
||||
#include <cryptopp/aes.h>
|
||||
#include <cryptopp/modes.h>
|
||||
#include "common/archives.h"
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/file_sys/archive_ncch.h"
|
||||
#include "core/file_sys/file_backend.h"
|
||||
|
@ -52,6 +57,82 @@ const ResultCode ERROR_WRONG_CERT_HANDLE = // 0xD8A0A0C9
|
|||
const ResultCode ERROR_CERT_ALREADY_SET = // 0xD8A0A03D
|
||||
ResultCode(61, ErrorModule::HTTP, ErrorSummary::InvalidState, ErrorLevel::Permanent);
|
||||
|
||||
void Context::MakeRequest() {
|
||||
ASSERT(state == RequestState::NotStarted);
|
||||
|
||||
#ifdef ENABLE_WEB_SERVICE
|
||||
LUrlParser::clParseURL parsedUrl = LUrlParser::clParseURL::ParseURL(url);
|
||||
int port;
|
||||
std::unique_ptr<httplib::Client> client;
|
||||
if (parsedUrl.m_Scheme == "http") {
|
||||
if (!parsedUrl.GetPort(&port)) {
|
||||
port = 80;
|
||||
}
|
||||
// TODO(B3N30): Support for setting timeout
|
||||
// Figure out what the default timeout on 3DS is
|
||||
client = std::make_unique<httplib::Client>(parsedUrl.m_Host.c_str(), port);
|
||||
} else {
|
||||
if (!parsedUrl.GetPort(&port)) {
|
||||
port = 443;
|
||||
}
|
||||
// TODO(B3N30): Support for setting timeout
|
||||
// Figure out what the default timeout on 3DS is
|
||||
|
||||
auto ssl_client = std::make_unique<httplib::SSLClient>(parsedUrl.m_Host, port);
|
||||
SSL_CTX* ctx = ssl_client->ssl_context();
|
||||
client = std::move(ssl_client);
|
||||
|
||||
if (auto client_cert = ssl_config.client_cert_ctx.lock()) {
|
||||
SSL_CTX_use_certificate_ASN1(ctx, client_cert->certificate.size(),
|
||||
client_cert->certificate.data());
|
||||
SSL_CTX_use_PrivateKey_ASN1(EVP_PKEY_RSA, ctx, client_cert->private_key.data(),
|
||||
client_cert->private_key.size());
|
||||
}
|
||||
|
||||
// TODO(B3N30): Check for SSLOptions-Bits and set the verify method accordingly
|
||||
// https://www.3dbrew.org/wiki/SSL_Services#SSLOpt
|
||||
// Hack: Since for now RootCerts are not implemented we set the VerifyMode to None.
|
||||
SSL_CTX_set_verify(ctx, SSL_VERIFY_NONE, NULL);
|
||||
}
|
||||
|
||||
state = RequestState::InProgress;
|
||||
|
||||
static const std::unordered_map<RequestMethod, std::string> request_method_strings{
|
||||
{RequestMethod::Get, "GET"}, {RequestMethod::Post, "POST"},
|
||||
{RequestMethod::Head, "HEAD"}, {RequestMethod::Put, "PUT"},
|
||||
{RequestMethod::Delete, "DELETE"}, {RequestMethod::PostEmpty, "POST"},
|
||||
{RequestMethod::PutEmpty, "PUT"},
|
||||
};
|
||||
|
||||
httplib::Request request;
|
||||
request.method = request_method_strings.at(method);
|
||||
request.path = url;
|
||||
// TODO(B3N30): Add post data body
|
||||
request.progress = [this](u64 current, u64 total) -> bool {
|
||||
// TODO(B3N30): Is there a state that shows response header are available
|
||||
current_download_size_bytes = current;
|
||||
total_download_size_bytes = total;
|
||||
return true;
|
||||
};
|
||||
|
||||
for (const auto& header : headers) {
|
||||
request.headers.emplace(header.name, header.value);
|
||||
}
|
||||
|
||||
if (!client->send(request, response)) {
|
||||
LOG_ERROR(Service_HTTP, "Request failed");
|
||||
state = RequestState::TimedOut;
|
||||
} else {
|
||||
LOG_DEBUG(Service_HTTP, "Request successful");
|
||||
// TODO(B3N30): Verify this state on HW
|
||||
state = RequestState::ReadyToDownloadContent;
|
||||
}
|
||||
#else
|
||||
LOG_ERROR(Service_HTTP, "Tried to make request but WebServices is not enabled in this build");
|
||||
state = RequestState::TimedOut;
|
||||
#endif
|
||||
}
|
||||
|
||||
void HTTP_C::Initialize(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp(ctx, 0x1, 1, 4);
|
||||
const u32 shmem_size = rp.Pop<u32>();
|
||||
|
@ -156,7 +237,15 @@ void HTTP_C::BeginRequest(Kernel::HLERequestContext& ctx) {
|
|||
auto itr = contexts.find(context_handle);
|
||||
ASSERT(itr != contexts.end());
|
||||
|
||||
// TODO(B3N30): Make the request
|
||||
// On a 3DS BeginRequest and BeginRequestAsync will push the Request to a worker queue.
|
||||
// You can only enqueue 8 requests at the same time.
|
||||
// trying to enqueue any more will either fail (BeginRequestAsync), or block (BeginRequest)
|
||||
// Note that you only can have 8 Contexts at a time. So this difference shouldn't matter
|
||||
// Then there are 3? worker threads that pop the requests from the queue and send them
|
||||
// For now make every request async in it's own thread.
|
||||
|
||||
itr->second.request_future =
|
||||
std::async(std::launch::async, &Context::MakeRequest, std::ref(itr->second));
|
||||
|
||||
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
|
@ -201,7 +290,15 @@ void HTTP_C::BeginRequestAsync(Kernel::HLERequestContext& ctx) {
|
|||
auto itr = contexts.find(context_handle);
|
||||
ASSERT(itr != contexts.end());
|
||||
|
||||
// TODO(B3N30): Make the request
|
||||
// On a 3DS BeginRequest and BeginRequestAsync will push the Request to a worker queue.
|
||||
// You can only enqueue 8 requests at the same time.
|
||||
// trying to enqueue any more will either fail (BeginRequestAsync), or block (BeginRequest)
|
||||
// Note that you only can have 8 Contexts at a time. So this difference shouldn't matter
|
||||
// Then there are 3? worker threads that pop the requests from the queue and send them
|
||||
// For now make every request async in it's own thread.
|
||||
|
||||
itr->second.request_future =
|
||||
std::async(std::launch::async, &Context::MakeRequest, std::ref(itr->second));
|
||||
|
||||
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
|
@ -264,7 +361,7 @@ void HTTP_C::CreateContext(Kernel::HLERequestContext& ctx) {
|
|||
return;
|
||||
}
|
||||
|
||||
contexts.emplace(++context_counter, Context());
|
||||
contexts.try_emplace(++context_counter);
|
||||
contexts[context_counter].url = std::move(url);
|
||||
contexts[context_counter].method = method;
|
||||
contexts[context_counter].state = RequestState::NotStarted;
|
||||
|
@ -311,10 +408,9 @@ void HTTP_C::CloseContext(Kernel::HLERequestContext& ctx) {
|
|||
}
|
||||
|
||||
// TODO(Subv): What happens if you try to close a context that's currently being used?
|
||||
ASSERT(itr->second.state == RequestState::NotStarted);
|
||||
|
||||
// TODO(Subv): Make sure that only the session that created the context can close it.
|
||||
|
||||
// Note that this will block if a request is still in progress
|
||||
contexts.erase(itr);
|
||||
session_data->num_http_contexts--;
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <future>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
@ -15,6 +16,12 @@
|
|||
#include <boost/serialization/unordered_map.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
#include <boost/serialization/weak_ptr.hpp>
|
||||
#ifdef ENABLE_WEB_SERVICE
|
||||
#if defined(__ANDROID__)
|
||||
#include <ifaddrs.h>
|
||||
#endif
|
||||
#include <httplib.h>
|
||||
#endif
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
|
@ -113,8 +120,7 @@ public:
|
|||
Context(const Context&) = delete;
|
||||
Context& operator=(const Context&) = delete;
|
||||
|
||||
Context(Context&& other) = default;
|
||||
Context& operator=(Context&&) = default;
|
||||
void MakeRequest();
|
||||
|
||||
struct Proxy {
|
||||
std::string url;
|
||||
|
@ -195,14 +201,21 @@ public:
|
|||
u32 session_id;
|
||||
std::string url;
|
||||
RequestMethod method;
|
||||
RequestState state = RequestState::NotStarted;
|
||||
boost::optional<Proxy> proxy;
|
||||
boost::optional<BasicAuth> basic_auth;
|
||||
std::atomic<RequestState> state = RequestState::NotStarted;
|
||||
std::optional<Proxy> proxy;
|
||||
std::optional<BasicAuth> basic_auth;
|
||||
SSLConfig ssl_config{};
|
||||
u32 socket_buffer_size;
|
||||
std::vector<RequestHeader> headers;
|
||||
std::vector<PostData> post_data;
|
||||
|
||||
std::future<void> request_future;
|
||||
std::atomic<u64> current_download_size_bytes;
|
||||
std::atomic<u64> total_download_size_bytes;
|
||||
#ifdef ENABLE_WEB_SERVICE
|
||||
httplib::Response response;
|
||||
#endif
|
||||
|
||||
private:
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int) {
|
||||
|
@ -219,6 +232,7 @@ private:
|
|||
ar& post_data;
|
||||
}
|
||||
friend class boost::serialization::access;
|
||||
|
||||
};
|
||||
|
||||
struct SessionData : public Kernel::SessionRequestHandler::SessionDataBase {
|
||||
|
|
|
@ -55,7 +55,7 @@ VAddr CROHelper::SegmentTagToAddress(SegmentTag segment_tag) const {
|
|||
return 0;
|
||||
|
||||
SegmentEntry entry;
|
||||
GetEntry(memory, segment_tag.segment_index, entry);
|
||||
GetEntry(system.Memory(), segment_tag.segment_index, entry);
|
||||
|
||||
if (segment_tag.offset_into_segment >= entry.size)
|
||||
return 0;
|
||||
|
@ -71,12 +71,12 @@ ResultCode CROHelper::ApplyRelocation(VAddr target_address, RelocationType reloc
|
|||
break;
|
||||
case RelocationType::AbsoluteAddress:
|
||||
case RelocationType::AbsoluteAddress2:
|
||||
memory.Write32(target_address, symbol_address + addend);
|
||||
cpu.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
system.Memory().Write32(target_address, symbol_address + addend);
|
||||
system.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
break;
|
||||
case RelocationType::RelativeAddress:
|
||||
memory.Write32(target_address, symbol_address + addend - target_future_address);
|
||||
cpu.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
system.Memory().Write32(target_address, symbol_address + addend - target_future_address);
|
||||
system.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
break;
|
||||
case RelocationType::ThumbBranch:
|
||||
case RelocationType::ArmBranch:
|
||||
|
@ -98,8 +98,8 @@ ResultCode CROHelper::ClearRelocation(VAddr target_address, RelocationType reloc
|
|||
case RelocationType::AbsoluteAddress:
|
||||
case RelocationType::AbsoluteAddress2:
|
||||
case RelocationType::RelativeAddress:
|
||||
memory.Write32(target_address, 0);
|
||||
cpu.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
system.Memory().Write32(target_address, 0);
|
||||
system.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
break;
|
||||
case RelocationType::ThumbBranch:
|
||||
case RelocationType::ArmBranch:
|
||||
|
@ -121,7 +121,8 @@ ResultCode CROHelper::ApplyRelocationBatch(VAddr batch, u32 symbol_address, bool
|
|||
VAddr relocation_address = batch;
|
||||
while (true) {
|
||||
RelocationEntry relocation;
|
||||
memory.ReadBlock(process, relocation_address, &relocation, sizeof(RelocationEntry));
|
||||
system.Memory().ReadBlock(process, relocation_address, &relocation,
|
||||
sizeof(RelocationEntry));
|
||||
|
||||
VAddr relocation_target = SegmentTagToAddress(relocation.target_position);
|
||||
if (relocation_target == 0) {
|
||||
|
@ -142,9 +143,9 @@ ResultCode CROHelper::ApplyRelocationBatch(VAddr batch, u32 symbol_address, bool
|
|||
}
|
||||
|
||||
RelocationEntry relocation;
|
||||
memory.ReadBlock(process, batch, &relocation, sizeof(RelocationEntry));
|
||||
system.Memory().ReadBlock(process, batch, &relocation, sizeof(RelocationEntry));
|
||||
relocation.is_batch_resolved = reset ? 0 : 1;
|
||||
memory.WriteBlock(process, batch, &relocation, sizeof(RelocationEntry));
|
||||
system.Memory().WriteBlock(process, batch, &relocation, sizeof(RelocationEntry));
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -154,13 +155,13 @@ VAddr CROHelper::FindExportNamedSymbol(const std::string& name) const {
|
|||
|
||||
std::size_t len = name.size();
|
||||
ExportTreeEntry entry;
|
||||
GetEntry(memory, 0, entry);
|
||||
GetEntry(system.Memory(), 0, entry);
|
||||
ExportTreeEntry::Child next;
|
||||
next.raw = entry.left.raw;
|
||||
u32 found_id;
|
||||
|
||||
while (true) {
|
||||
GetEntry(memory, next.next_index, entry);
|
||||
GetEntry(system.Memory(), next.next_index, entry);
|
||||
|
||||
if (next.is_end) {
|
||||
found_id = entry.export_table_index;
|
||||
|
@ -186,9 +187,9 @@ VAddr CROHelper::FindExportNamedSymbol(const std::string& name) const {
|
|||
|
||||
u32 export_strings_size = GetField(ExportStringsSize);
|
||||
ExportNamedSymbolEntry symbol_entry;
|
||||
GetEntry(memory, found_id, symbol_entry);
|
||||
GetEntry(system.Memory(), found_id, symbol_entry);
|
||||
|
||||
if (memory.ReadCString(symbol_entry.name_offset, export_strings_size) != name)
|
||||
if (system.Memory().ReadCString(symbol_entry.name_offset, export_strings_size) != name)
|
||||
return 0;
|
||||
|
||||
return SegmentTagToAddress(symbol_entry.symbol_position);
|
||||
|
@ -279,7 +280,7 @@ ResultVal<VAddr> CROHelper::RebaseSegmentTable(u32 cro_size, VAddr data_segment_
|
|||
u32 segment_num = GetField(SegmentNum);
|
||||
for (u32 i = 0; i < segment_num; ++i) {
|
||||
SegmentEntry segment;
|
||||
GetEntry(memory, i, segment);
|
||||
GetEntry(system.Memory(), i, segment);
|
||||
if (segment.type == SegmentType::Data) {
|
||||
if (segment.size != 0) {
|
||||
if (segment.size > data_segment_size)
|
||||
|
@ -298,7 +299,7 @@ ResultVal<VAddr> CROHelper::RebaseSegmentTable(u32 cro_size, VAddr data_segment_
|
|||
if (segment.offset > module_address + cro_size)
|
||||
return CROFormatError(0x19);
|
||||
}
|
||||
SetEntry(memory, i, segment);
|
||||
SetEntry(system.Memory(), i, segment);
|
||||
}
|
||||
return MakeResult<u32>(prev_data_segment + module_address);
|
||||
}
|
||||
|
@ -310,7 +311,7 @@ ResultCode CROHelper::RebaseExportNamedSymbolTable() {
|
|||
u32 export_named_symbol_num = GetField(ExportNamedSymbolNum);
|
||||
for (u32 i = 0; i < export_named_symbol_num; ++i) {
|
||||
ExportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset += module_address;
|
||||
|
@ -320,7 +321,7 @@ ResultCode CROHelper::RebaseExportNamedSymbolTable() {
|
|||
}
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -329,7 +330,7 @@ ResultCode CROHelper::VerifyExportTreeTable() const {
|
|||
u32 tree_num = GetField(ExportTreeNum);
|
||||
for (u32 i = 0; i < tree_num; ++i) {
|
||||
ExportTreeEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.left.next_index >= tree_num || entry.right.next_index >= tree_num) {
|
||||
return CROFormatError(0x11);
|
||||
|
@ -353,7 +354,7 @@ ResultCode CROHelper::RebaseImportModuleTable() {
|
|||
u32 module_num = GetField(ImportModuleNum);
|
||||
for (u32 i = 0; i < module_num; ++i) {
|
||||
ImportModuleEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset += module_address;
|
||||
|
@ -379,7 +380,7 @@ ResultCode CROHelper::RebaseImportModuleTable() {
|
|||
}
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -395,7 +396,7 @@ ResultCode CROHelper::RebaseImportNamedSymbolTable() {
|
|||
u32 num = GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset += module_address;
|
||||
|
@ -413,7 +414,7 @@ ResultCode CROHelper::RebaseImportNamedSymbolTable() {
|
|||
}
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -427,7 +428,7 @@ ResultCode CROHelper::RebaseImportIndexedSymbolTable() {
|
|||
u32 num = GetField(ImportIndexedSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportIndexedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.relocation_batch_offset != 0) {
|
||||
entry.relocation_batch_offset += module_address;
|
||||
|
@ -437,7 +438,7 @@ ResultCode CROHelper::RebaseImportIndexedSymbolTable() {
|
|||
}
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -451,7 +452,7 @@ ResultCode CROHelper::RebaseImportAnonymousSymbolTable() {
|
|||
u32 num = GetField(ImportAnonymousSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportAnonymousSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.relocation_batch_offset != 0) {
|
||||
entry.relocation_batch_offset += module_address;
|
||||
|
@ -461,7 +462,7 @@ ResultCode CROHelper::RebaseImportAnonymousSymbolTable() {
|
|||
}
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -476,14 +477,14 @@ ResultCode CROHelper::ResetExternalRelocations() {
|
|||
ExternalRelocationEntry relocation;
|
||||
|
||||
// Verifies that the last relocation is the end of a batch
|
||||
GetEntry(memory, external_relocation_num - 1, relocation);
|
||||
GetEntry(system.Memory(), external_relocation_num - 1, relocation);
|
||||
if (!relocation.is_batch_end) {
|
||||
return CROFormatError(0x12);
|
||||
}
|
||||
|
||||
bool batch_begin = true;
|
||||
for (u32 i = 0; i < external_relocation_num; ++i) {
|
||||
GetEntry(memory, i, relocation);
|
||||
GetEntry(system.Memory(), i, relocation);
|
||||
VAddr relocation_target = SegmentTagToAddress(relocation.target_position);
|
||||
|
||||
if (relocation_target == 0) {
|
||||
|
@ -500,7 +501,7 @@ ResultCode CROHelper::ResetExternalRelocations() {
|
|||
if (batch_begin) {
|
||||
// resets to unresolved state
|
||||
relocation.is_batch_resolved = 0;
|
||||
SetEntry(memory, i, relocation);
|
||||
SetEntry(system.Memory(), i, relocation);
|
||||
}
|
||||
|
||||
// if current is an end, then the next is a beginning
|
||||
|
@ -516,7 +517,7 @@ ResultCode CROHelper::ClearExternalRelocations() {
|
|||
|
||||
bool batch_begin = true;
|
||||
for (u32 i = 0; i < external_relocation_num; ++i) {
|
||||
GetEntry(memory, i, relocation);
|
||||
GetEntry(system.Memory(), i, relocation);
|
||||
VAddr relocation_target = SegmentTagToAddress(relocation.target_position);
|
||||
|
||||
if (relocation_target == 0) {
|
||||
|
@ -532,7 +533,7 @@ ResultCode CROHelper::ClearExternalRelocations() {
|
|||
if (batch_begin) {
|
||||
// resets to unresolved state
|
||||
relocation.is_batch_resolved = 0;
|
||||
SetEntry(memory, i, relocation);
|
||||
SetEntry(system.Memory(), i, relocation);
|
||||
}
|
||||
|
||||
// if current is an end, then the next is a beginning
|
||||
|
@ -548,13 +549,13 @@ ResultCode CROHelper::ApplyStaticAnonymousSymbolToCRS(VAddr crs_address) {
|
|||
static_relocation_table_offset +
|
||||
GetField(StaticRelocationNum) * sizeof(StaticRelocationEntry);
|
||||
|
||||
CROHelper crs(crs_address, process, memory, cpu);
|
||||
CROHelper crs(crs_address, process, system);
|
||||
u32 offset_export_num = GetField(StaticAnonymousSymbolNum);
|
||||
LOG_INFO(Service_LDR, "CRO \"{}\" exports {} static anonymous symbols", ModuleName(),
|
||||
offset_export_num);
|
||||
for (u32 i = 0; i < offset_export_num; ++i) {
|
||||
StaticAnonymousSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
u32 batch_address = entry.relocation_batch_offset + module_address;
|
||||
|
||||
if (batch_address < static_relocation_table_offset ||
|
||||
|
@ -579,7 +580,7 @@ ResultCode CROHelper::ApplyInternalRelocations(u32 old_data_segment_address) {
|
|||
u32 internal_relocation_num = GetField(InternalRelocationNum);
|
||||
for (u32 i = 0; i < internal_relocation_num; ++i) {
|
||||
InternalRelocationEntry relocation;
|
||||
GetEntry(memory, i, relocation);
|
||||
GetEntry(system.Memory(), i, relocation);
|
||||
VAddr target_addressB = SegmentTagToAddress(relocation.target_position);
|
||||
if (target_addressB == 0) {
|
||||
return CROFormatError(0x15);
|
||||
|
@ -587,7 +588,7 @@ ResultCode CROHelper::ApplyInternalRelocations(u32 old_data_segment_address) {
|
|||
|
||||
VAddr target_address;
|
||||
SegmentEntry target_segment;
|
||||
GetEntry(memory, relocation.target_position.segment_index, target_segment);
|
||||
GetEntry(system.Memory(), relocation.target_position.segment_index, target_segment);
|
||||
|
||||
if (target_segment.type == SegmentType::Data) {
|
||||
// If the relocation is to the .data segment, we need to relocate it in the old buffer
|
||||
|
@ -602,7 +603,7 @@ ResultCode CROHelper::ApplyInternalRelocations(u32 old_data_segment_address) {
|
|||
}
|
||||
|
||||
SegmentEntry symbol_segment;
|
||||
GetEntry(memory, relocation.symbol_segment, symbol_segment);
|
||||
GetEntry(system.Memory(), relocation.symbol_segment, symbol_segment);
|
||||
LOG_TRACE(Service_LDR, "Internally relocates 0x{:08X} with 0x{:08X}", target_address,
|
||||
symbol_segment.offset);
|
||||
ResultCode result = ApplyRelocation(target_address, relocation.type, relocation.addend,
|
||||
|
@ -619,7 +620,7 @@ ResultCode CROHelper::ClearInternalRelocations() {
|
|||
u32 internal_relocation_num = GetField(InternalRelocationNum);
|
||||
for (u32 i = 0; i < internal_relocation_num; ++i) {
|
||||
InternalRelocationEntry relocation;
|
||||
GetEntry(memory, i, relocation);
|
||||
GetEntry(system.Memory(), i, relocation);
|
||||
VAddr target_address = SegmentTagToAddress(relocation.target_position);
|
||||
|
||||
if (target_address == 0) {
|
||||
|
@ -639,13 +640,13 @@ void CROHelper::UnrebaseImportAnonymousSymbolTable() {
|
|||
u32 num = GetField(ImportAnonymousSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportAnonymousSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.relocation_batch_offset != 0) {
|
||||
entry.relocation_batch_offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -653,13 +654,13 @@ void CROHelper::UnrebaseImportIndexedSymbolTable() {
|
|||
u32 num = GetField(ImportIndexedSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportIndexedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.relocation_batch_offset != 0) {
|
||||
entry.relocation_batch_offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -667,7 +668,7 @@ void CROHelper::UnrebaseImportNamedSymbolTable() {
|
|||
u32 num = GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset -= module_address;
|
||||
|
@ -677,7 +678,7 @@ void CROHelper::UnrebaseImportNamedSymbolTable() {
|
|||
entry.relocation_batch_offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -685,7 +686,7 @@ void CROHelper::UnrebaseImportModuleTable() {
|
|||
u32 module_num = GetField(ImportModuleNum);
|
||||
for (u32 i = 0; i < module_num; ++i) {
|
||||
ImportModuleEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset -= module_address;
|
||||
|
@ -699,7 +700,7 @@ void CROHelper::UnrebaseImportModuleTable() {
|
|||
entry.import_anonymous_symbol_table_offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -707,13 +708,13 @@ void CROHelper::UnrebaseExportNamedSymbolTable() {
|
|||
u32 export_named_symbol_num = GetField(ExportNamedSymbolNum);
|
||||
for (u32 i = 0; i < export_named_symbol_num; ++i) {
|
||||
ExportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -721,7 +722,7 @@ void CROHelper::UnrebaseSegmentTable() {
|
|||
u32 segment_num = GetField(SegmentNum);
|
||||
for (u32 i = 0; i < segment_num; ++i) {
|
||||
SegmentEntry segment;
|
||||
GetEntry(memory, i, segment);
|
||||
GetEntry(system.Memory(), i, segment);
|
||||
|
||||
if (segment.type == SegmentType::BSS) {
|
||||
segment.offset = 0;
|
||||
|
@ -729,7 +730,7 @@ void CROHelper::UnrebaseSegmentTable() {
|
|||
segment.offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, segment);
|
||||
SetEntry(system.Memory(), i, segment);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -751,17 +752,17 @@ ResultCode CROHelper::ApplyImportNamedSymbol(VAddr crs_address) {
|
|||
u32 symbol_import_num = GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < symbol_import_num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
if (!relocation_entry.is_batch_resolved) {
|
||||
ResultCode result = ForEachAutoLinkCRO(
|
||||
process, memory, cpu, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
process, system, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
std::string symbol_name =
|
||||
memory.ReadCString(entry.name_offset, import_strings_size);
|
||||
system.Memory().ReadCString(entry.name_offset, import_strings_size);
|
||||
u32 symbol_address = source.FindExportNamedSymbol(symbol_name);
|
||||
|
||||
if (symbol_address != 0) {
|
||||
|
@ -794,10 +795,10 @@ ResultCode CROHelper::ResetImportNamedSymbol() {
|
|||
u32 symbol_import_num = GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < symbol_import_num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
ResultCode result = ApplyRelocationBatch(relocation_addr, unresolved_symbol, true);
|
||||
|
@ -815,10 +816,10 @@ ResultCode CROHelper::ResetImportIndexedSymbol() {
|
|||
u32 import_num = GetField(ImportIndexedSymbolNum);
|
||||
for (u32 i = 0; i < import_num; ++i) {
|
||||
ImportIndexedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
ResultCode result = ApplyRelocationBatch(relocation_addr, unresolved_symbol, true);
|
||||
|
@ -836,10 +837,10 @@ ResultCode CROHelper::ResetImportAnonymousSymbol() {
|
|||
u32 import_num = GetField(ImportAnonymousSymbolNum);
|
||||
for (u32 i = 0; i < import_num; ++i) {
|
||||
ImportAnonymousSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
ResultCode result = ApplyRelocationBatch(relocation_addr, unresolved_symbol, true);
|
||||
|
@ -857,19 +858,20 @@ ResultCode CROHelper::ApplyModuleImport(VAddr crs_address) {
|
|||
u32 import_module_num = GetField(ImportModuleNum);
|
||||
for (u32 i = 0; i < import_module_num; ++i) {
|
||||
ImportModuleEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
std::string want_cro_name = memory.ReadCString(entry.name_offset, import_strings_size);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
std::string want_cro_name =
|
||||
system.Memory().ReadCString(entry.name_offset, import_strings_size);
|
||||
|
||||
ResultCode result = ForEachAutoLinkCRO(
|
||||
process, memory, cpu, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
process, system, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
if (want_cro_name == source.ModuleName()) {
|
||||
LOG_INFO(Service_LDR, "CRO \"{}\" imports {} indexed symbols from \"{}\"",
|
||||
ModuleName(), entry.import_indexed_symbol_num, source.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_indexed_symbol_num; ++j) {
|
||||
ImportIndexedSymbolEntry im;
|
||||
entry.GetImportIndexedSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportIndexedSymbolEntry(process, system.Memory(), j, im);
|
||||
ExportIndexedSymbolEntry ex;
|
||||
source.GetEntry(memory, im.index, ex);
|
||||
source.GetEntry(system.Memory(), im.index, ex);
|
||||
u32 symbol_address = source.SegmentTagToAddress(ex.symbol_position);
|
||||
LOG_TRACE(Service_LDR, " Imports 0x{:08X}", symbol_address);
|
||||
ResultCode result =
|
||||
|
@ -884,7 +886,7 @@ ResultCode CROHelper::ApplyModuleImport(VAddr crs_address) {
|
|||
ModuleName(), entry.import_anonymous_symbol_num, source.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_anonymous_symbol_num; ++j) {
|
||||
ImportAnonymousSymbolEntry im;
|
||||
entry.GetImportAnonymousSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportAnonymousSymbolEntry(process, system.Memory(), j, im);
|
||||
u32 symbol_address = source.SegmentTagToAddress(im.symbol_position);
|
||||
LOG_TRACE(Service_LDR, " Imports 0x{:08X}", symbol_address);
|
||||
ResultCode result =
|
||||
|
@ -913,15 +915,15 @@ ResultCode CROHelper::ApplyExportNamedSymbol(CROHelper target) {
|
|||
u32 target_symbol_import_num = target.GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < target_symbol_import_num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
target.GetEntry(memory, i, entry);
|
||||
target.GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
if (!relocation_entry.is_batch_resolved) {
|
||||
std::string symbol_name =
|
||||
memory.ReadCString(entry.name_offset, target_import_strings_size);
|
||||
system.Memory().ReadCString(entry.name_offset, target_import_strings_size);
|
||||
u32 symbol_address = FindExportNamedSymbol(symbol_name);
|
||||
if (symbol_address != 0) {
|
||||
LOG_TRACE(Service_LDR, " exports symbol \"{}\"", symbol_name);
|
||||
|
@ -944,15 +946,15 @@ ResultCode CROHelper::ResetExportNamedSymbol(CROHelper target) {
|
|||
u32 target_symbol_import_num = target.GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < target_symbol_import_num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
target.GetEntry(memory, i, entry);
|
||||
target.GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
if (relocation_entry.is_batch_resolved) {
|
||||
std::string symbol_name =
|
||||
memory.ReadCString(entry.name_offset, target_import_strings_size);
|
||||
system.Memory().ReadCString(entry.name_offset, target_import_strings_size);
|
||||
u32 symbol_address = FindExportNamedSymbol(symbol_name);
|
||||
if (symbol_address != 0) {
|
||||
LOG_TRACE(Service_LDR, " unexports symbol \"{}\"", symbol_name);
|
||||
|
@ -974,18 +976,19 @@ ResultCode CROHelper::ApplyModuleExport(CROHelper target) {
|
|||
u32 target_import_module_num = target.GetField(ImportModuleNum);
|
||||
for (u32 i = 0; i < target_import_module_num; ++i) {
|
||||
ImportModuleEntry entry;
|
||||
target.GetEntry(memory, i, entry);
|
||||
target.GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (memory.ReadCString(entry.name_offset, target_import_string_size) != module_name)
|
||||
if (system.Memory().ReadCString(entry.name_offset, target_import_string_size) !=
|
||||
module_name)
|
||||
continue;
|
||||
|
||||
LOG_INFO(Service_LDR, "CRO \"{}\" exports {} indexed symbols to \"{}\"", module_name,
|
||||
entry.import_indexed_symbol_num, target.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_indexed_symbol_num; ++j) {
|
||||
ImportIndexedSymbolEntry im;
|
||||
entry.GetImportIndexedSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportIndexedSymbolEntry(process, system.Memory(), j, im);
|
||||
ExportIndexedSymbolEntry ex;
|
||||
GetEntry(memory, im.index, ex);
|
||||
GetEntry(system.Memory(), im.index, ex);
|
||||
u32 symbol_address = SegmentTagToAddress(ex.symbol_position);
|
||||
LOG_TRACE(Service_LDR, " exports symbol 0x{:08X}", symbol_address);
|
||||
ResultCode result =
|
||||
|
@ -1000,7 +1003,7 @@ ResultCode CROHelper::ApplyModuleExport(CROHelper target) {
|
|||
entry.import_anonymous_symbol_num, target.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_anonymous_symbol_num; ++j) {
|
||||
ImportAnonymousSymbolEntry im;
|
||||
entry.GetImportAnonymousSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportAnonymousSymbolEntry(process, system.Memory(), j, im);
|
||||
u32 symbol_address = SegmentTagToAddress(im.symbol_position);
|
||||
LOG_TRACE(Service_LDR, " exports symbol 0x{:08X}", symbol_address);
|
||||
ResultCode result =
|
||||
|
@ -1023,16 +1026,17 @@ ResultCode CROHelper::ResetModuleExport(CROHelper target) {
|
|||
u32 target_import_module_num = target.GetField(ImportModuleNum);
|
||||
for (u32 i = 0; i < target_import_module_num; ++i) {
|
||||
ImportModuleEntry entry;
|
||||
target.GetEntry(memory, i, entry);
|
||||
target.GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (memory.ReadCString(entry.name_offset, target_import_string_size) != module_name)
|
||||
if (system.Memory().ReadCString(entry.name_offset, target_import_string_size) !=
|
||||
module_name)
|
||||
continue;
|
||||
|
||||
LOG_DEBUG(Service_LDR, "CRO \"{}\" unexports indexed symbols to \"{}\"", module_name,
|
||||
target.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_indexed_symbol_num; ++j) {
|
||||
ImportIndexedSymbolEntry im;
|
||||
entry.GetImportIndexedSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportIndexedSymbolEntry(process, system.Memory(), j, im);
|
||||
ResultCode result =
|
||||
target.ApplyRelocationBatch(im.relocation_batch_offset, unresolved_symbol, true);
|
||||
if (result.IsError()) {
|
||||
|
@ -1045,7 +1049,7 @@ ResultCode CROHelper::ResetModuleExport(CROHelper target) {
|
|||
target.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_anonymous_symbol_num; ++j) {
|
||||
ImportAnonymousSymbolEntry im;
|
||||
entry.GetImportAnonymousSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportAnonymousSymbolEntry(process, system.Memory(), j, im);
|
||||
ResultCode result =
|
||||
target.ApplyRelocationBatch(im.relocation_batch_offset, unresolved_symbol, true);
|
||||
if (result.IsError()) {
|
||||
|
@ -1063,15 +1067,16 @@ ResultCode CROHelper::ApplyExitRelocations(VAddr crs_address) {
|
|||
u32 symbol_import_num = GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < symbol_import_num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
if (memory.ReadCString(entry.name_offset, import_strings_size) == "__aeabi_atexit") {
|
||||
if (system.Memory().ReadCString(entry.name_offset, import_strings_size) ==
|
||||
"__aeabi_atexit") {
|
||||
ResultCode result = ForEachAutoLinkCRO(
|
||||
process, memory, cpu, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
process, system, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
u32 symbol_address = source.FindExportNamedSymbol("nnroAeabiAtexit_");
|
||||
|
||||
if (symbol_address != 0) {
|
||||
|
@ -1126,7 +1131,8 @@ ResultCode CROHelper::Rebase(VAddr crs_address, u32 cro_size, VAddr data_segment
|
|||
return result;
|
||||
}
|
||||
|
||||
result = VerifyStringTableLength(memory, GetField(ModuleNameOffset), GetField(ModuleNameSize));
|
||||
result = VerifyStringTableLength(system.Memory(), GetField(ModuleNameOffset),
|
||||
GetField(ModuleNameSize));
|
||||
if (result.IsError()) {
|
||||
LOG_ERROR(Service_LDR, "Error verifying module name {:08X}", result.raw);
|
||||
return result;
|
||||
|
@ -1155,8 +1161,8 @@ ResultCode CROHelper::Rebase(VAddr crs_address, u32 cro_size, VAddr data_segment
|
|||
return result;
|
||||
}
|
||||
|
||||
result =
|
||||
VerifyStringTableLength(memory, GetField(ExportStringsOffset), GetField(ExportStringsSize));
|
||||
result = VerifyStringTableLength(system.Memory(), GetField(ExportStringsOffset),
|
||||
GetField(ExportStringsSize));
|
||||
if (result.IsError()) {
|
||||
LOG_ERROR(Service_LDR, "Error verifying export strings {:08X}", result.raw);
|
||||
return result;
|
||||
|
@ -1192,8 +1198,8 @@ ResultCode CROHelper::Rebase(VAddr crs_address, u32 cro_size, VAddr data_segment
|
|||
return result;
|
||||
}
|
||||
|
||||
result =
|
||||
VerifyStringTableLength(memory, GetField(ImportStringsOffset), GetField(ImportStringsSize));
|
||||
result = VerifyStringTableLength(system.Memory(), GetField(ImportStringsOffset),
|
||||
GetField(ImportStringsSize));
|
||||
if (result.IsError()) {
|
||||
LOG_ERROR(Service_LDR, "Error verifying import strings {:08X}", result.raw);
|
||||
return result;
|
||||
|
@ -1266,11 +1272,11 @@ ResultCode CROHelper::Link(VAddr crs_address, bool link_on_load_bug_fix) {
|
|||
// so we do the same
|
||||
if (GetField(SegmentNum) >= 2) { // means we have .data segment
|
||||
SegmentEntry entry;
|
||||
GetEntry(memory, 2, entry);
|
||||
GetEntry(system.Memory(), 2, entry);
|
||||
ASSERT(entry.type == SegmentType::Data);
|
||||
data_segment_address = entry.offset;
|
||||
entry.offset = GetField(DataOffset);
|
||||
SetEntry(memory, 2, entry);
|
||||
SetEntry(system.Memory(), 2, entry);
|
||||
}
|
||||
}
|
||||
SCOPE_EXIT({
|
||||
|
@ -1278,9 +1284,9 @@ ResultCode CROHelper::Link(VAddr crs_address, bool link_on_load_bug_fix) {
|
|||
if (link_on_load_bug_fix) {
|
||||
if (GetField(SegmentNum) >= 2) {
|
||||
SegmentEntry entry;
|
||||
GetEntry(memory, 2, entry);
|
||||
GetEntry(system.Memory(), 2, entry);
|
||||
entry.offset = data_segment_address;
|
||||
SetEntry(memory, 2, entry);
|
||||
SetEntry(system.Memory(), 2, entry);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -1301,7 +1307,7 @@ ResultCode CROHelper::Link(VAddr crs_address, bool link_on_load_bug_fix) {
|
|||
}
|
||||
|
||||
// Exports symbols to other modules
|
||||
result = ForEachAutoLinkCRO(process, memory, cpu, crs_address,
|
||||
result = ForEachAutoLinkCRO(process, system, crs_address,
|
||||
[this](CROHelper target) -> ResultVal<bool> {
|
||||
ResultCode result = ApplyExportNamedSymbol(target);
|
||||
if (result.IsError())
|
||||
|
@ -1346,7 +1352,7 @@ ResultCode CROHelper::Unlink(VAddr crs_address) {
|
|||
|
||||
// Resets all symbols in other modules imported from this module
|
||||
// Note: the RO service seems only searching in auto-link modules
|
||||
result = ForEachAutoLinkCRO(process, memory, cpu, crs_address,
|
||||
result = ForEachAutoLinkCRO(process, system, crs_address,
|
||||
[this](CROHelper target) -> ResultVal<bool> {
|
||||
ResultCode result = ResetExportNamedSymbol(target);
|
||||
if (result.IsError())
|
||||
|
@ -1387,13 +1393,13 @@ void CROHelper::InitCRS() {
|
|||
}
|
||||
|
||||
void CROHelper::Register(VAddr crs_address, bool auto_link) {
|
||||
CROHelper crs(crs_address, process, memory, cpu);
|
||||
CROHelper head(auto_link ? crs.NextModule() : crs.PreviousModule(), process, memory, cpu);
|
||||
CROHelper crs(crs_address, process, system);
|
||||
CROHelper head(auto_link ? crs.NextModule() : crs.PreviousModule(), process, system);
|
||||
|
||||
if (head.module_address) {
|
||||
// there are already CROs registered
|
||||
// register as the new tail
|
||||
CROHelper tail(head.PreviousModule(), process, memory, cpu);
|
||||
CROHelper tail(head.PreviousModule(), process, system);
|
||||
|
||||
// link with the old tail
|
||||
ASSERT(tail.NextModule() == 0);
|
||||
|
@ -1419,11 +1425,11 @@ void CROHelper::Register(VAddr crs_address, bool auto_link) {
|
|||
}
|
||||
|
||||
void CROHelper::Unregister(VAddr crs_address) {
|
||||
CROHelper crs(crs_address, process, memory, cpu);
|
||||
CROHelper next_head(crs.NextModule(), process, memory, cpu);
|
||||
CROHelper previous_head(crs.PreviousModule(), process, memory, cpu);
|
||||
CROHelper next(NextModule(), process, memory, cpu);
|
||||
CROHelper previous(PreviousModule(), process, memory, cpu);
|
||||
CROHelper crs(crs_address, process, system);
|
||||
CROHelper next_head(crs.NextModule(), process, system);
|
||||
CROHelper previous_head(crs.PreviousModule(), process, system);
|
||||
CROHelper next(NextModule(), process, system);
|
||||
CROHelper previous(PreviousModule(), process, system);
|
||||
|
||||
if (module_address == next_head.module_address ||
|
||||
module_address == previous_head.module_address) {
|
||||
|
@ -1517,7 +1523,7 @@ std::tuple<VAddr, u32> CROHelper::GetExecutablePages() const {
|
|||
u32 segment_num = GetField(SegmentNum);
|
||||
for (u32 i = 0; i < segment_num; ++i) {
|
||||
SegmentEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
if (entry.type == SegmentType::Code && entry.size != 0) {
|
||||
VAddr begin = Common::AlignDown(entry.offset, Memory::PAGE_SIZE);
|
||||
VAddr end = Common::AlignUp(entry.offset + entry.size, Memory::PAGE_SIZE);
|
||||
|
|
|
@ -33,12 +33,11 @@ static constexpr u32 CRO_HASH_SIZE = 0x80;
|
|||
class CROHelper final {
|
||||
public:
|
||||
// TODO (wwylele): pass in the process handle for memory access
|
||||
explicit CROHelper(VAddr cro_address, Kernel::Process& process, Memory::MemorySystem& memory,
|
||||
ARM_Interface& cpu)
|
||||
: module_address(cro_address), process(process), memory(memory), cpu(cpu) {}
|
||||
explicit CROHelper(VAddr cro_address, Kernel::Process& process, Core::System& system)
|
||||
: module_address(cro_address), process(process), system(system) {}
|
||||
|
||||
std::string ModuleName() const {
|
||||
return memory.ReadCString(GetField(ModuleNameOffset), GetField(ModuleNameSize));
|
||||
return system.Memory().ReadCString(GetField(ModuleNameOffset), GetField(ModuleNameSize));
|
||||
}
|
||||
|
||||
u32 GetFileSize() const {
|
||||
|
@ -144,8 +143,7 @@ public:
|
|||
private:
|
||||
const VAddr module_address; ///< the virtual address of this module
|
||||
Kernel::Process& process; ///< the owner process of this module
|
||||
Memory::MemorySystem& memory;
|
||||
ARM_Interface& cpu;
|
||||
Core::System& system;
|
||||
|
||||
/**
|
||||
* Each item in this enum represents a u32 field in the header begin from address+0x80,
|
||||
|
@ -403,11 +401,11 @@ private:
|
|||
}
|
||||
|
||||
u32 GetField(HeaderField field) const {
|
||||
return memory.Read32(Field(field));
|
||||
return system.Memory().Read32(Field(field));
|
||||
}
|
||||
|
||||
void SetField(HeaderField field, u32 value) {
|
||||
memory.Write32(Field(field), value);
|
||||
system.Memory().Write32(Field(field), value);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -474,12 +472,11 @@ private:
|
|||
* otherwise error code of the last iteration.
|
||||
*/
|
||||
template <typename FunctionObject>
|
||||
static ResultCode ForEachAutoLinkCRO(Kernel::Process& process, Memory::MemorySystem& memory,
|
||||
ARM_Interface& cpu, VAddr crs_address,
|
||||
FunctionObject func) {
|
||||
static ResultCode ForEachAutoLinkCRO(Kernel::Process& process, Core::System& system,
|
||||
VAddr crs_address, FunctionObject func) {
|
||||
VAddr current = crs_address;
|
||||
while (current != 0) {
|
||||
CROHelper cro(current, process, memory, cpu);
|
||||
CROHelper cro(current, process, system);
|
||||
CASCADE_RESULT(bool next, func(cro));
|
||||
if (!next)
|
||||
break;
|
||||
|
|
|
@ -120,7 +120,7 @@ void RO::Initialize(Kernel::HLERequestContext& ctx) {
|
|||
return;
|
||||
}
|
||||
|
||||
CROHelper crs(crs_address, *process, system.Memory(), system.CPU());
|
||||
CROHelper crs(crs_address, *process, system);
|
||||
crs.InitCRS();
|
||||
|
||||
result = crs.Rebase(0, crs_size, 0, 0, 0, 0, true);
|
||||
|
@ -254,7 +254,7 @@ void RO::LoadCRO(Kernel::HLERequestContext& ctx, bool link_on_load_bug_fix) {
|
|||
return;
|
||||
}
|
||||
|
||||
CROHelper cro(cro_address, *process, system.Memory(), system.CPU());
|
||||
CROHelper cro(cro_address, *process, system);
|
||||
|
||||
result = cro.VerifyHash(cro_size, crr_address);
|
||||
if (result.IsError()) {
|
||||
|
@ -318,7 +318,7 @@ void RO::LoadCRO(Kernel::HLERequestContext& ctx, bool link_on_load_bug_fix) {
|
|||
}
|
||||
}
|
||||
|
||||
system.CPU().InvalidateCacheRange(cro_address, cro_size);
|
||||
system.InvalidateCacheRange(cro_address, cro_size);
|
||||
|
||||
LOG_INFO(Service_LDR, "CRO \"{}\" loaded at 0x{:08X}, fixed_end=0x{:08X}", cro.ModuleName(),
|
||||
cro_address, cro_address + fix_size);
|
||||
|
@ -336,7 +336,7 @@ void RO::UnloadCRO(Kernel::HLERequestContext& ctx) {
|
|||
LOG_DEBUG(Service_LDR, "called, cro_address=0x{:08X}, zero={}, cro_buffer_ptr=0x{:08X}",
|
||||
cro_address, zero, cro_buffer_ptr);
|
||||
|
||||
CROHelper cro(cro_address, *process, system.Memory(), system.CPU());
|
||||
CROHelper cro(cro_address, *process, system);
|
||||
|
||||
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
|
||||
|
||||
|
@ -391,7 +391,7 @@ void RO::UnloadCRO(Kernel::HLERequestContext& ctx) {
|
|||
LOG_ERROR(Service_LDR, "Error unmapping CRO {:08X}", result.raw);
|
||||
}
|
||||
|
||||
system.CPU().InvalidateCacheRange(cro_address, fixed_size);
|
||||
system.InvalidateCacheRange(cro_address, fixed_size);
|
||||
|
||||
rb.Push(result);
|
||||
}
|
||||
|
@ -403,7 +403,7 @@ void RO::LinkCRO(Kernel::HLERequestContext& ctx) {
|
|||
|
||||
LOG_DEBUG(Service_LDR, "called, cro_address=0x{:08X}", cro_address);
|
||||
|
||||
CROHelper cro(cro_address, *process, system.Memory(), system.CPU());
|
||||
CROHelper cro(cro_address, *process, system);
|
||||
|
||||
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
|
||||
|
||||
|
@ -443,7 +443,7 @@ void RO::UnlinkCRO(Kernel::HLERequestContext& ctx) {
|
|||
|
||||
LOG_DEBUG(Service_LDR, "called, cro_address=0x{:08X}", cro_address);
|
||||
|
||||
CROHelper cro(cro_address, *process, system.Memory(), system.CPU());
|
||||
CROHelper cro(cro_address, *process, system);
|
||||
|
||||
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
|
||||
|
||||
|
@ -492,7 +492,7 @@ void RO::Shutdown(Kernel::HLERequestContext& ctx) {
|
|||
return;
|
||||
}
|
||||
|
||||
CROHelper crs(slot->loaded_crs, *process, system.Memory(), system.CPU());
|
||||
CROHelper crs(slot->loaded_crs, *process, system);
|
||||
crs.Unrebase(true);
|
||||
|
||||
ResultCode result = RESULT_SUCCESS;
|
||||
|
|
|
@ -402,8 +402,8 @@ inline void Write(u32 addr, const T data) {
|
|||
switch (index) {
|
||||
|
||||
// Memory fills are triggered once the fill value is written.
|
||||
case GPU_REG_INDEX_WORKAROUND(memory_fill_config[0].trigger, 0x00004 + 0x3):
|
||||
case GPU_REG_INDEX_WORKAROUND(memory_fill_config[1].trigger, 0x00008 + 0x3): {
|
||||
case GPU_REG_INDEX(memory_fill_config[0].trigger):
|
||||
case GPU_REG_INDEX(memory_fill_config[1].trigger): {
|
||||
const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].trigger));
|
||||
auto& config = g_regs.memory_fill_config[is_second_filler];
|
||||
|
||||
|
|
|
@ -22,41 +22,15 @@ namespace GPU {
|
|||
constexpr float SCREEN_REFRESH_RATE = 60;
|
||||
|
||||
// Returns index corresponding to the Regs member labeled by field_name
|
||||
// TODO: Due to Visual studio bug 209229, offsetof does not return constant expressions
|
||||
// when used with array elements (e.g. GPU_REG_INDEX(memory_fill_config[0])).
|
||||
// For details cf.
|
||||
// https://connect.microsoft.com/VisualStudio/feedback/details/209229/offsetof-does-not-produce-a-constant-expression-for-array-members
|
||||
// Hopefully, this will be fixed sometime in the future.
|
||||
// For lack of better alternatives, we currently hardcode the offsets when constant
|
||||
// expressions are needed via GPU_REG_INDEX_WORKAROUND (on sane compilers, static_asserts
|
||||
// will then make sure the offsets indeed match the automatically calculated ones).
|
||||
#define GPU_REG_INDEX(field_name) (offsetof(GPU::Regs, field_name) / sizeof(u32))
|
||||
#if defined(_MSC_VER)
|
||||
#define GPU_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) (backup_workaround_index)
|
||||
#else
|
||||
// NOTE: Yeah, hacking in a static_assert here just to workaround the lacking MSVC compiler
|
||||
// really is this annoying. This macro just forwards its first argument to GPU_REG_INDEX
|
||||
// and then performs a (no-op) cast to std::size_t iff the second argument matches the
|
||||
// expected field offset. Otherwise, the compiler will fail to compile this code.
|
||||
#define GPU_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) \
|
||||
((typename std::enable_if<backup_workaround_index == GPU_REG_INDEX(field_name), \
|
||||
std::size_t>::type) GPU_REG_INDEX(field_name))
|
||||
#endif
|
||||
|
||||
// MMIO region 0x1EFxxxxx
|
||||
struct Regs {
|
||||
|
||||
// helper macro to make sure the defined structures are of the expected size.
|
||||
#if defined(_MSC_VER)
|
||||
// TODO: MSVC does not support using sizeof() on non-static data members even though this
|
||||
// is technically allowed since C++11. This macro should be enabled once MSVC adds
|
||||
// support for that.
|
||||
#define ASSERT_MEMBER_SIZE(name, size_in_bytes)
|
||||
#else
|
||||
#define ASSERT_MEMBER_SIZE(name, size_in_bytes) \
|
||||
static_assert(sizeof(name) == size_in_bytes, \
|
||||
"Structure size and register block length don't match")
|
||||
#endif
|
||||
|
||||
// Components are laid out in reverse byte order, most significant bits first.
|
||||
enum class PixelFormat : u32 {
|
||||
|
@ -307,10 +281,6 @@ private:
|
|||
};
|
||||
static_assert(std::is_standard_layout<Regs>::value, "Structure does not use standard layout");
|
||||
|
||||
// TODO: MSVC does not support using offsetof() on non-static data members even though this
|
||||
// is technically allowed since C++11. This macro should be enabled once MSVC adds
|
||||
// support for that.
|
||||
#ifndef _MSC_VER
|
||||
#define ASSERT_REG_POSITION(field_name, position) \
|
||||
static_assert(offsetof(Regs, field_name) == position * 4, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
@ -323,7 +293,6 @@ ASSERT_REG_POSITION(display_transfer_config, 0x00300);
|
|||
ASSERT_REG_POSITION(command_processor_config, 0x00638);
|
||||
|
||||
#undef ASSERT_REG_POSITION
|
||||
#endif // !defined(_MSC_VER)
|
||||
|
||||
// The total number of registers is chosen arbitrarily, but let's make sure it's not some odd value
|
||||
// anyway.
|
||||
|
|
|
@ -309,7 +309,7 @@ ResultStatus AppLoader_THREEDSX::ReadRomFS(std::shared_ptr<FileSys::RomFSReader>
|
|||
if (!romfs_file_inner.IsOpen())
|
||||
return ResultStatus::Error;
|
||||
|
||||
romfs_file = std::make_shared<FileSys::RomFSReader>(std::move(romfs_file_inner),
|
||||
romfs_file = std::make_shared<FileSys::DirectRomFSReader>(std::move(romfs_file_inner),
|
||||
romfs_offset, romfs_size);
|
||||
|
||||
return ResultStatus::Success;
|
||||
|
|
|
@ -105,13 +105,22 @@ public:
|
|||
* Loads the system mode that this application needs.
|
||||
* This function defaults to 2 (96MB allocated to the application) if it can't read the
|
||||
* information.
|
||||
* @returns A pair with the optional system mode, and and the status.
|
||||
* @returns A pair with the optional system mode, and the status.
|
||||
*/
|
||||
virtual std::pair<std::optional<u32>, ResultStatus> LoadKernelSystemMode() {
|
||||
// 96MB allocated to the application.
|
||||
return std::make_pair(2, ResultStatus::Success);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the N3ds mode that this application uses.
|
||||
* It defaults to 0 (O3DS default) if it can't read the information.
|
||||
* @returns A pair with the optional N3ds mode, and the status.
|
||||
*/
|
||||
virtual std::pair<std::optional<u8>, ResultStatus> LoadKernelN3dsMode() {
|
||||
return std::make_pair(0, ResultStatus::Success);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get whether this application is executable.
|
||||
* @param out_executable Reference to store the executable flag into.
|
||||
|
@ -186,6 +195,15 @@ public:
|
|||
return ResultStatus::ErrorNotImplemented;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump the RomFS of the applciation
|
||||
* @param target_path The target path to dump to
|
||||
* @return ResultStatus result of function
|
||||
*/
|
||||
virtual ResultStatus DumpRomFS(const std::string& target_path) {
|
||||
return ResultStatus::ErrorNotImplemented;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the update RomFS of the application
|
||||
* Since the RomFS can be huge, we return a file reference instead of copying to a buffer
|
||||
|
@ -196,6 +214,15 @@ public:
|
|||
return ResultStatus::ErrorNotImplemented;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump the update RomFS of the applciation
|
||||
* @param target_path The target path to dump to
|
||||
* @return ResultStatus result of function
|
||||
*/
|
||||
virtual ResultStatus DumpUpdateRomFS(const std::string& target_path) {
|
||||
return ResultStatus::ErrorNotImplemented;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the title of the application
|
||||
* @param title Reference to store the application title into
|
||||
|
|
|
@ -61,6 +61,19 @@ std::pair<std::optional<u32>, ResultStatus> AppLoader_NCCH::LoadKernelSystemMode
|
|||
ResultStatus::Success);
|
||||
}
|
||||
|
||||
std::pair<std::optional<u8>, ResultStatus> AppLoader_NCCH::LoadKernelN3dsMode() {
|
||||
if (!is_loaded) {
|
||||
ResultStatus res = base_ncch.Load();
|
||||
if (res != ResultStatus::Success) {
|
||||
return std::make_pair(std::optional<u8>{}, res);
|
||||
}
|
||||
}
|
||||
|
||||
// Set the system mode as the one from the exheader.
|
||||
return std::make_pair(overlay_ncch->exheader_header.arm11_system_local_caps.n3ds_mode,
|
||||
ResultStatus::Success);
|
||||
}
|
||||
|
||||
ResultStatus AppLoader_NCCH::LoadExec(std::shared_ptr<Kernel::Process>& process) {
|
||||
using Kernel::CodeSet;
|
||||
|
||||
|
@ -254,6 +267,18 @@ ResultStatus AppLoader_NCCH::ReadUpdateRomFS(std::shared_ptr<FileSys::RomFSReade
|
|||
return ResultStatus::Success;
|
||||
}
|
||||
|
||||
ResultStatus AppLoader_NCCH::DumpRomFS(const std::string& target_path) {
|
||||
return base_ncch.DumpRomFS(target_path);
|
||||
}
|
||||
|
||||
ResultStatus AppLoader_NCCH::DumpUpdateRomFS(const std::string& target_path) {
|
||||
u64 program_id;
|
||||
ReadProgramId(program_id);
|
||||
update_ncch.OpenFile(
|
||||
Service::AM::GetTitleContentPath(Service::FS::MediaType::SDMC, program_id | UPDATE_MASK));
|
||||
return update_ncch.DumpRomFS(target_path);
|
||||
}
|
||||
|
||||
ResultStatus AppLoader_NCCH::ReadTitle(std::string& title) {
|
||||
std::vector<u8> data;
|
||||
Loader::SMDH smdh;
|
||||
|
|
|
@ -41,6 +41,8 @@ public:
|
|||
*/
|
||||
std::pair<std::optional<u32>, ResultStatus> LoadKernelSystemMode() override;
|
||||
|
||||
std::pair<std::optional<u8>, ResultStatus> LoadKernelN3dsMode() override;
|
||||
|
||||
ResultStatus IsExecutable(bool& out_executable) override;
|
||||
|
||||
ResultStatus ReadCode(std::vector<u8>& buffer) override;
|
||||
|
@ -59,6 +61,10 @@ public:
|
|||
|
||||
ResultStatus ReadUpdateRomFS(std::shared_ptr<FileSys::RomFSReader>& romfs_file) override;
|
||||
|
||||
ResultStatus DumpRomFS(const std::string& target_path) override;
|
||||
|
||||
ResultStatus DumpUpdateRomFS(const std::string& target_path) override;
|
||||
|
||||
ResultStatus ReadTitle(std::string& title) override;
|
||||
|
||||
private:
|
||||
|
|
|
@ -46,7 +46,9 @@ void RPCServer::HandleWriteMemory(Packet& packet, u32 address, const u8* data, u
|
|||
Core::System::GetInstance().Memory().WriteBlock(
|
||||
*Core::System::GetInstance().Kernel().GetCurrentProcess(), address, data, data_size);
|
||||
// If the memory happens to be executable code, make sure the changes become visible
|
||||
Core::CPU().InvalidateCacheRange(address, data_size);
|
||||
|
||||
// Is current core correct here?
|
||||
Core::System::GetInstance().InvalidateCacheRange(address, data_size);
|
||||
}
|
||||
packet.SetPacketDataSize(0);
|
||||
packet.SendReply();
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
#include "input_common/udp/client.h"
|
||||
#include "input_common/udp/protocol.h"
|
||||
|
||||
using boost::asio::ip::address_v4;
|
||||
using boost::asio::ip::udp;
|
||||
|
||||
namespace InputCommon::CemuhookUDP {
|
||||
|
@ -31,10 +30,10 @@ public:
|
|||
|
||||
explicit Socket(const std::string& host, u16 port, u8 pad_index, u32 client_id,
|
||||
SocketCallback callback)
|
||||
: client_id(client_id), timer(io_service),
|
||||
send_endpoint(udp::endpoint(address_v4::from_string(host), port)),
|
||||
socket(io_service, udp::endpoint(udp::v4(), 0)), pad_index(pad_index),
|
||||
callback(std::move(callback)) {}
|
||||
: callback(std::move(callback)), timer(io_service),
|
||||
socket(io_service, udp::endpoint(udp::v4(), 0)), client_id(client_id),
|
||||
pad_index(pad_index),
|
||||
send_endpoint(udp::endpoint(boost::asio::ip::make_address_v4(host), port)) {}
|
||||
|
||||
void Stop() {
|
||||
io_service.stop();
|
||||
|
@ -126,7 +125,7 @@ static void SocketLoop(Socket* socket) {
|
|||
|
||||
Client::Client(std::shared_ptr<DeviceStatus> status, const std::string& host, u16 port,
|
||||
u8 pad_index, u32 client_id)
|
||||
: status(status) {
|
||||
: status(std::move(status)) {
|
||||
StartCommunication(host, port, pad_index, client_id);
|
||||
}
|
||||
|
||||
|
@ -208,7 +207,7 @@ void TestCommunication(const std::string& host, u16 port, u8 pad_index, u32 clie
|
|||
Common::Event success_event;
|
||||
SocketCallback callback{[](Response::Version version) {}, [](Response::PortInfo info) {},
|
||||
[&](Response::PadData data) { success_event.Set(); }};
|
||||
Socket socket{host, port, pad_index, client_id, callback};
|
||||
Socket socket{host, port, pad_index, client_id, std::move(callback)};
|
||||
std::thread worker_thread{SocketLoop, &socket};
|
||||
bool result = success_event.WaitFor(std::chrono::seconds(8));
|
||||
socket.Stop();
|
||||
|
@ -264,7 +263,7 @@ CalibrationConfigurationJob::CalibrationConfigurationJob(
|
|||
complete_event.Set();
|
||||
}
|
||||
}};
|
||||
Socket socket{host, port, pad_index, client_id, callback};
|
||||
Socket socket{host, port, pad_index, client_id, std::move(callback)};
|
||||
std::thread worker_thread{SocketLoop, &socket};
|
||||
complete_event.Wait();
|
||||
socket.Stop();
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#include <string>
|
||||
#include <thread>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/thread.h"
|
||||
#include "common/vector_math.h"
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include <array>
|
||||
#include <optional>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
#include <boost/crc.hpp>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/swap.h"
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include <mutex>
|
||||
#include <tuple>
|
||||
#include "common/param_package.h"
|
||||
#include "core/frontend/input.h"
|
||||
#include "core/settings.h"
|
||||
|
@ -14,7 +15,7 @@ namespace InputCommon::CemuhookUDP {
|
|||
class UDPTouchDevice final : public Input::TouchDevice {
|
||||
public:
|
||||
explicit UDPTouchDevice(std::shared_ptr<DeviceStatus> status_) : status(std::move(status_)) {}
|
||||
std::tuple<float, float, bool> GetStatus() const {
|
||||
std::tuple<float, float, bool> GetStatus() const override {
|
||||
std::lock_guard guard(status->update_mutex);
|
||||
return status->touch_status;
|
||||
}
|
||||
|
@ -26,7 +27,7 @@ private:
|
|||
class UDPMotionDevice final : public Input::MotionDevice {
|
||||
public:
|
||||
explicit UDPMotionDevice(std::shared_ptr<DeviceStatus> status_) : status(std::move(status_)) {}
|
||||
std::tuple<Common::Vec3<float>, Common::Vec3<float>> GetStatus() const {
|
||||
std::tuple<Common::Vec3<float>, Common::Vec3<float>> GetStatus() const override {
|
||||
std::lock_guard guard(status->update_mutex);
|
||||
return status->motion_status;
|
||||
}
|
||||
|
|
|
@ -2,16 +2,13 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include "input_common/main.h"
|
||||
#include "input_common/udp/client.h"
|
||||
|
||||
namespace InputCommon::CemuhookUDP {
|
||||
|
||||
class UDPTouchDevice;
|
||||
class UDPMotionDevice;
|
||||
|
||||
class State {
|
||||
public:
|
||||
State();
|
||||
|
|
|
@ -15,9 +15,9 @@ static std::shared_ptr<Memory::PageTable> page_table = nullptr;
|
|||
TestEnvironment::TestEnvironment(bool mutable_memory_)
|
||||
: mutable_memory(mutable_memory_), test_memory(std::make_shared<TestMemory>(this)) {
|
||||
|
||||
timing = std::make_unique<Core::Timing>();
|
||||
timing = std::make_unique<Core::Timing>(1);
|
||||
memory = std::make_unique<Memory::MemorySystem>();
|
||||
kernel = std::make_unique<Kernel::KernelSystem>(*memory, *timing, [] {}, 0);
|
||||
kernel = std::make_unique<Kernel::KernelSystem>(*memory, *timing, [] {}, 0, 1, 0);
|
||||
|
||||
kernel->SetCurrentProcess(kernel->CreateProcess(kernel->CreateCodeSet("", 0)));
|
||||
page_table = kernel->GetCurrentProcess()->vm_manager.page_table;
|
||||
|
|
|
@ -23,7 +23,7 @@ TEST_CASE("ARM_DynCom (vfp): vadd", "[arm_dyncom]") {
|
|||
test_env.SetMemory32(0, 0xEE321A03); // vadd.f32 s2, s4, s6
|
||||
test_env.SetMemory32(4, 0xEAFFFFFE); // b +#0
|
||||
|
||||
ARM_DynCom dyncom(nullptr, test_env.GetMemory(), USER32MODE);
|
||||
ARM_DynCom dyncom(nullptr, test_env.GetMemory(), USER32MODE, 0, nullptr);
|
||||
|
||||
std::vector<VfpTestCase> test_cases{{
|
||||
#include "vfp_vadd_f32.inc"
|
||||
|
|
|
@ -34,16 +34,16 @@ static void AdvanceAndCheck(Core::Timing& timing, u32 idx, int downcount, int ex
|
|||
expected_callback = CB_IDS[idx];
|
||||
lateness = expected_lateness;
|
||||
|
||||
timing.AddTicks(timing.GetDowncount() -
|
||||
timing.GetTimer(0)->AddTicks(timing.GetTimer(0)->GetDowncount() -
|
||||
cpu_downcount); // Pretend we executed X cycles of instructions.
|
||||
timing.Advance();
|
||||
timing.GetTimer(0)->Advance();
|
||||
|
||||
REQUIRE(decltype(callbacks_ran_flags)().set(idx) == callbacks_ran_flags);
|
||||
REQUIRE(downcount == timing.GetDowncount());
|
||||
REQUIRE(downcount == timing.GetTimer(0)->GetDowncount());
|
||||
}
|
||||
|
||||
TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
|
||||
Core::TimingEventType* cb_a = timing.RegisterEvent("callbackA", CallbackTemplate<0>);
|
||||
Core::TimingEventType* cb_b = timing.RegisterEvent("callbackB", CallbackTemplate<1>);
|
||||
|
@ -52,60 +52,19 @@ TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
|
|||
Core::TimingEventType* cb_e = timing.RegisterEvent("callbackE", CallbackTemplate<4>);
|
||||
|
||||
// Enter slice 0
|
||||
timing.Advance();
|
||||
timing.GetTimer(0)->Advance();
|
||||
|
||||
// D -> B -> C -> A -> E
|
||||
timing.ScheduleEvent(1000, cb_a, CB_IDS[0]);
|
||||
REQUIRE(1000 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(500, cb_b, CB_IDS[1]);
|
||||
REQUIRE(500 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(800, cb_c, CB_IDS[2]);
|
||||
REQUIRE(500 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(100, cb_d, CB_IDS[3]);
|
||||
REQUIRE(100 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(1200, cb_e, CB_IDS[4]);
|
||||
REQUIRE(100 == timing.GetDowncount());
|
||||
|
||||
AdvanceAndCheck(timing, 3, 400);
|
||||
AdvanceAndCheck(timing, 1, 300);
|
||||
AdvanceAndCheck(timing, 2, 200);
|
||||
AdvanceAndCheck(timing, 0, 200);
|
||||
AdvanceAndCheck(timing, 4, MAX_SLICE_LENGTH);
|
||||
}
|
||||
|
||||
TEST_CASE("CoreTiming[Threadsave]", "[core]") {
|
||||
Core::Timing timing;
|
||||
|
||||
Core::TimingEventType* cb_a = timing.RegisterEvent("callbackA", CallbackTemplate<0>);
|
||||
Core::TimingEventType* cb_b = timing.RegisterEvent("callbackB", CallbackTemplate<1>);
|
||||
Core::TimingEventType* cb_c = timing.RegisterEvent("callbackC", CallbackTemplate<2>);
|
||||
Core::TimingEventType* cb_d = timing.RegisterEvent("callbackD", CallbackTemplate<3>);
|
||||
Core::TimingEventType* cb_e = timing.RegisterEvent("callbackE", CallbackTemplate<4>);
|
||||
|
||||
// Enter slice 0
|
||||
timing.Advance();
|
||||
|
||||
// D -> B -> C -> A -> E
|
||||
timing.ScheduleEventThreadsafe(1000, cb_a, CB_IDS[0]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
timing.ForceExceptionCheck(1000);
|
||||
REQUIRE(1000 == timing.GetDowncount());
|
||||
timing.ScheduleEventThreadsafe(500, cb_b, CB_IDS[1]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
timing.ForceExceptionCheck(500);
|
||||
REQUIRE(500 == timing.GetDowncount());
|
||||
timing.ScheduleEventThreadsafe(800, cb_c, CB_IDS[2]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
timing.ForceExceptionCheck(800);
|
||||
REQUIRE(500 == timing.GetDowncount());
|
||||
timing.ScheduleEventThreadsafe(100, cb_d, CB_IDS[3]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
timing.ForceExceptionCheck(100);
|
||||
REQUIRE(100 == timing.GetDowncount());
|
||||
timing.ScheduleEventThreadsafe(1200, cb_e, CB_IDS[4]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
timing.ForceExceptionCheck(1200);
|
||||
REQUIRE(100 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(1000, cb_a, CB_IDS[0], 0);
|
||||
REQUIRE(1000 == timing.GetTimer(0)->GetDowncount());
|
||||
timing.ScheduleEvent(500, cb_b, CB_IDS[1], 0);
|
||||
REQUIRE(500 == timing.GetTimer(0)->GetDowncount());
|
||||
timing.ScheduleEvent(800, cb_c, CB_IDS[2], 0);
|
||||
REQUIRE(500 == timing.GetTimer(0)->GetDowncount());
|
||||
timing.ScheduleEvent(100, cb_d, CB_IDS[3], 0);
|
||||
REQUIRE(100 == timing.GetTimer(0)->GetDowncount());
|
||||
timing.ScheduleEvent(1200, cb_e, CB_IDS[4], 0);
|
||||
REQUIRE(100 == timing.GetTimer(0)->GetDowncount());
|
||||
|
||||
AdvanceAndCheck(timing, 3, 400);
|
||||
AdvanceAndCheck(timing, 1, 300);
|
||||
|
@ -131,7 +90,7 @@ void FifoCallback(u64 userdata, s64 cycles_late) {
|
|||
TEST_CASE("CoreTiming[SharedSlot]", "[core]") {
|
||||
using namespace SharedSlotTest;
|
||||
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
|
||||
Core::TimingEventType* cb_a = timing.RegisterEvent("callbackA", FifoCallback<0>);
|
||||
Core::TimingEventType* cb_b = timing.RegisterEvent("callbackB", FifoCallback<1>);
|
||||
|
@ -139,36 +98,36 @@ TEST_CASE("CoreTiming[SharedSlot]", "[core]") {
|
|||
Core::TimingEventType* cb_d = timing.RegisterEvent("callbackD", FifoCallback<3>);
|
||||
Core::TimingEventType* cb_e = timing.RegisterEvent("callbackE", FifoCallback<4>);
|
||||
|
||||
timing.ScheduleEvent(1000, cb_a, CB_IDS[0]);
|
||||
timing.ScheduleEvent(1000, cb_b, CB_IDS[1]);
|
||||
timing.ScheduleEvent(1000, cb_c, CB_IDS[2]);
|
||||
timing.ScheduleEvent(1000, cb_d, CB_IDS[3]);
|
||||
timing.ScheduleEvent(1000, cb_e, CB_IDS[4]);
|
||||
timing.ScheduleEvent(1000, cb_a, CB_IDS[0], 0);
|
||||
timing.ScheduleEvent(1000, cb_b, CB_IDS[1], 0);
|
||||
timing.ScheduleEvent(1000, cb_c, CB_IDS[2], 0);
|
||||
timing.ScheduleEvent(1000, cb_d, CB_IDS[3], 0);
|
||||
timing.ScheduleEvent(1000, cb_e, CB_IDS[4], 0);
|
||||
|
||||
// Enter slice 0
|
||||
timing.Advance();
|
||||
REQUIRE(1000 == timing.GetDowncount());
|
||||
timing.GetTimer(0)->Advance();
|
||||
REQUIRE(1000 == timing.GetTimer(0)->GetDowncount());
|
||||
|
||||
callbacks_ran_flags = 0;
|
||||
counter = 0;
|
||||
lateness = 0;
|
||||
timing.AddTicks(timing.GetDowncount());
|
||||
timing.Advance();
|
||||
REQUIRE(MAX_SLICE_LENGTH == timing.GetDowncount());
|
||||
timing.GetTimer(0)->AddTicks(timing.GetTimer(0)->GetDowncount());
|
||||
timing.GetTimer(0)->Advance();
|
||||
REQUIRE(MAX_SLICE_LENGTH == timing.GetTimer(0)->GetDowncount());
|
||||
REQUIRE(0x1FULL == callbacks_ran_flags.to_ullong());
|
||||
}
|
||||
|
||||
TEST_CASE("CoreTiming[PredictableLateness]", "[core]") {
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
|
||||
Core::TimingEventType* cb_a = timing.RegisterEvent("callbackA", CallbackTemplate<0>);
|
||||
Core::TimingEventType* cb_b = timing.RegisterEvent("callbackB", CallbackTemplate<1>);
|
||||
|
||||
// Enter slice 0
|
||||
timing.Advance();
|
||||
timing.GetTimer(0)->Advance();
|
||||
|
||||
timing.ScheduleEvent(100, cb_a, CB_IDS[0]);
|
||||
timing.ScheduleEvent(200, cb_b, CB_IDS[1]);
|
||||
timing.ScheduleEvent(100, cb_a, CB_IDS[0], 0);
|
||||
timing.ScheduleEvent(200, cb_b, CB_IDS[1], 0);
|
||||
|
||||
AdvanceAndCheck(timing, 0, 90, 10, -10); // (100 - 10)
|
||||
AdvanceAndCheck(timing, 1, MAX_SLICE_LENGTH, 50, -50);
|
||||
|
@ -190,7 +149,7 @@ static void RescheduleCallback(Core::Timing& timing, u64 userdata, s64 cycles_la
|
|||
TEST_CASE("CoreTiming[ChainScheduling]", "[core]") {
|
||||
using namespace ChainSchedulingTest;
|
||||
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
|
||||
Core::TimingEventType* cb_a = timing.RegisterEvent("callbackA", CallbackTemplate<0>);
|
||||
Core::TimingEventType* cb_b = timing.RegisterEvent("callbackB", CallbackTemplate<1>);
|
||||
|
@ -201,28 +160,30 @@ TEST_CASE("CoreTiming[ChainScheduling]", "[core]") {
|
|||
});
|
||||
|
||||
// Enter slice 0
|
||||
timing.Advance();
|
||||
timing.GetTimer(0)->Advance();
|
||||
|
||||
timing.ScheduleEvent(800, cb_a, CB_IDS[0]);
|
||||
timing.ScheduleEvent(1000, cb_b, CB_IDS[1]);
|
||||
timing.ScheduleEvent(2200, cb_c, CB_IDS[2]);
|
||||
timing.ScheduleEvent(1000, cb_rs, reinterpret_cast<u64>(cb_rs));
|
||||
REQUIRE(800 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(800, cb_a, CB_IDS[0], 0);
|
||||
timing.ScheduleEvent(1000, cb_b, CB_IDS[1], 0);
|
||||
timing.ScheduleEvent(2200, cb_c, CB_IDS[2], 0);
|
||||
timing.ScheduleEvent(1000, cb_rs, reinterpret_cast<u64>(cb_rs), 0);
|
||||
REQUIRE(800 == timing.GetTimer(0)->GetDowncount());
|
||||
|
||||
reschedules = 3;
|
||||
AdvanceAndCheck(timing, 0, 200); // cb_a
|
||||
AdvanceAndCheck(timing, 1, 1000); // cb_b, cb_rs
|
||||
REQUIRE(2 == reschedules);
|
||||
|
||||
timing.AddTicks(timing.GetDowncount());
|
||||
timing.Advance(); // cb_rs
|
||||
timing.GetTimer(0)->AddTicks(timing.GetTimer(0)->GetDowncount());
|
||||
timing.GetTimer(0)->Advance(); // cb_rs
|
||||
REQUIRE(1 == reschedules);
|
||||
REQUIRE(200 == timing.GetDowncount());
|
||||
REQUIRE(200 == timing.GetTimer(0)->GetDowncount());
|
||||
|
||||
AdvanceAndCheck(timing, 2, 800); // cb_c
|
||||
|
||||
timing.AddTicks(timing.GetDowncount());
|
||||
timing.Advance(); // cb_rs
|
||||
timing.GetTimer(0)->AddTicks(timing.GetTimer(0)->GetDowncount());
|
||||
timing.GetTimer(0)->Advance(); // cb_rs
|
||||
REQUIRE(0 == reschedules);
|
||||
REQUIRE(MAX_SLICE_LENGTH == timing.GetDowncount());
|
||||
REQUIRE(MAX_SLICE_LENGTH == timing.GetTimer(0)->GetDowncount());
|
||||
}
|
||||
|
||||
// TODO: Add tests for multiple timers
|
||||
|
|
|
@ -24,9 +24,9 @@ static std::shared_ptr<Object> MakeObject(Kernel::KernelSystem& kernel) {
|
|||
}
|
||||
|
||||
TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel]") {
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
Memory::MemorySystem memory;
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0);
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0, 1, 0);
|
||||
auto [server, client] = kernel.CreateSessionPair();
|
||||
HLERequestContext context(kernel, std::move(server), nullptr);
|
||||
|
||||
|
@ -239,9 +239,9 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
|||
}
|
||||
|
||||
TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
Memory::MemorySystem memory;
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0);
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0, 1, 0);
|
||||
auto [server, client] = kernel.CreateSessionPair();
|
||||
HLERequestContext context(kernel, std::move(server), nullptr);
|
||||
|
||||
|
|
|
@ -11,9 +11,9 @@
|
|||
#include "core/memory.h"
|
||||
|
||||
TEST_CASE("Memory::IsValidVirtualAddress", "[core][memory]") {
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
Memory::MemorySystem memory;
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0);
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0, 1, 0);
|
||||
SECTION("these regions should not be mapped on an empty process") {
|
||||
auto process = kernel.CreateProcess(kernel.CreateCodeSet("", 0));
|
||||
CHECK(Memory::IsValidVirtualAddress(*process, Memory::PROCESS_IMAGE_VADDR) == false);
|
||||
|
|
|
@ -1926,7 +1926,7 @@ void RasterizerCacheOpenGL::ValidateSurface(const Surface& surface, PAddr addr,
|
|||
}
|
||||
|
||||
void RasterizerCacheOpenGL::ClearAll(bool flush) {
|
||||
const SurfaceInterval flush_interval(0x0, 0xFFFFFFFF);
|
||||
const auto flush_interval = PageMap::interval_type::right_open(0x0, 0xFFFFFFFF);
|
||||
// Force flush all surfaces from the cache
|
||||
if (flush) {
|
||||
FlushRegion(0x0, 0xFFFFFFFF);
|
||||
|
@ -1945,8 +1945,8 @@ void RasterizerCacheOpenGL::ClearAll(bool flush) {
|
|||
|
||||
// Remove the whole cache without really looking at it.
|
||||
cached_pages -= flush_interval;
|
||||
dirty_regions -= flush_interval;
|
||||
surface_cache -= flush_interval;
|
||||
dirty_regions -= SurfaceInterval(0x0, 0xFFFFFFFF);
|
||||
surface_cache -= SurfaceInterval(0x0, 0xFFFFFFFF);
|
||||
remove_surfaces.clear();
|
||||
}
|
||||
|
||||
|
|
|
@ -80,11 +80,15 @@ struct CachedSurface;
|
|||
using Surface = std::shared_ptr<CachedSurface>;
|
||||
using SurfaceSet = std::set<Surface>;
|
||||
|
||||
using SurfaceRegions = boost::icl::interval_set<PAddr>;
|
||||
using SurfaceMap = boost::icl::interval_map<PAddr, Surface>;
|
||||
using SurfaceCache = boost::icl::interval_map<PAddr, SurfaceSet>;
|
||||
using SurfaceInterval = boost::icl::right_open_interval<PAddr>;
|
||||
using SurfaceRegions = boost::icl::interval_set<PAddr, std::less, SurfaceInterval>;
|
||||
using SurfaceMap =
|
||||
boost::icl::interval_map<PAddr, Surface, boost::icl::partial_absorber, std::less,
|
||||
boost::icl::inplace_plus, boost::icl::inter_section, SurfaceInterval>;
|
||||
using SurfaceCache =
|
||||
boost::icl::interval_map<PAddr, SurfaceSet, boost::icl::partial_absorber, std::less,
|
||||
boost::icl::inplace_plus, boost::icl::inter_section, SurfaceInterval>;
|
||||
|
||||
using SurfaceInterval = SurfaceCache::interval_type;
|
||||
static_assert(std::is_same<SurfaceRegions::interval_type, SurfaceCache::interval_type>() &&
|
||||
std::is_same<SurfaceMap::interval_type, SurfaceCache::interval_type>(),
|
||||
"incorrect interval types");
|
||||
|
@ -101,6 +105,29 @@ enum class ScaleMatch {
|
|||
};
|
||||
|
||||
struct SurfaceParams {
|
||||
private:
|
||||
static constexpr std::array<unsigned int, 18> BPP_TABLE = {
|
||||
32, // RGBA8
|
||||
24, // RGB8
|
||||
16, // RGB5A1
|
||||
16, // RGB565
|
||||
16, // RGBA4
|
||||
16, // IA8
|
||||
16, // RG8
|
||||
8, // I8
|
||||
8, // A8
|
||||
8, // IA4
|
||||
4, // I4
|
||||
4, // A4
|
||||
4, // ETC1
|
||||
8, // ETC1A4
|
||||
16, // D16
|
||||
0,
|
||||
24, // D24
|
||||
32, // D24S8
|
||||
};
|
||||
|
||||
public:
|
||||
enum class PixelFormat {
|
||||
// First 5 formats are shared between textures and color buffers
|
||||
RGBA8 = 0,
|
||||
|
@ -139,30 +166,11 @@ struct SurfaceParams {
|
|||
};
|
||||
|
||||
static constexpr unsigned int GetFormatBpp(PixelFormat format) {
|
||||
constexpr std::array<unsigned int, 18> bpp_table = {
|
||||
32, // RGBA8
|
||||
24, // RGB8
|
||||
16, // RGB5A1
|
||||
16, // RGB565
|
||||
16, // RGBA4
|
||||
16, // IA8
|
||||
16, // RG8
|
||||
8, // I8
|
||||
8, // A8
|
||||
8, // IA4
|
||||
4, // I4
|
||||
4, // A4
|
||||
4, // ETC1
|
||||
8, // ETC1A4
|
||||
16, // D16
|
||||
0,
|
||||
24, // D24
|
||||
32, // D24S8
|
||||
};
|
||||
|
||||
assert(static_cast<std::size_t>(format) < bpp_table.size());
|
||||
return bpp_table[static_cast<std::size_t>(format)];
|
||||
const auto format_idx = static_cast<std::size_t>(format);
|
||||
DEBUG_ASSERT_MSG(format_idx < BPP_TABLE.size(), "Invalid pixel format {}", format_idx);
|
||||
return BPP_TABLE[format_idx];
|
||||
}
|
||||
|
||||
unsigned int GetFormatBpp() const {
|
||||
return GetFormatBpp(pixel_format);
|
||||
}
|
||||
|
@ -245,7 +253,7 @@ struct SurfaceParams {
|
|||
}
|
||||
|
||||
SurfaceInterval GetInterval() const {
|
||||
return SurfaceInterval::right_open(addr, end);
|
||||
return SurfaceInterval(addr, end);
|
||||
}
|
||||
|
||||
// Returns the outer rectangle containing "interval"
|
||||
|
|
|
@ -18,3 +18,6 @@ get_directory_property(OPENSSL_LIBS
|
|||
DEFINITION OPENSSL_LIBS)
|
||||
target_compile_definitions(web_service PRIVATE -DCPPHTTPLIB_OPENSSL_SUPPORT)
|
||||
target_link_libraries(web_service PRIVATE common network json-headers ${OPENSSL_LIBS} httplib lurlparser cpp-jwt)
|
||||
if (ANDROID)
|
||||
target_link_libraries(web_service PRIVATE ifaddrs)
|
||||
endif()
|
||||
|
|
|
@ -8,6 +8,9 @@
|
|||
#include <string>
|
||||
#include <LUrlParser.h>
|
||||
#include <fmt/format.h>
|
||||
#if defined(__ANDROID__)
|
||||
#include <ifaddrs.h>
|
||||
#endif
|
||||
#include <httplib.h>
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
|
@ -73,14 +76,14 @@ struct Client::Impl {
|
|||
if (!parsedUrl.GetPort(&port)) {
|
||||
port = HTTP_PORT;
|
||||
}
|
||||
cli = std::make_unique<httplib::Client>(parsedUrl.m_Host.c_str(), port,
|
||||
TIMEOUT_SECONDS);
|
||||
cli = std::make_unique<httplib::Client>(parsedUrl.m_Host.c_str(), port);
|
||||
cli->set_timeout_sec(TIMEOUT_SECONDS);
|
||||
} else if (parsedUrl.m_Scheme == "https") {
|
||||
if (!parsedUrl.GetPort(&port)) {
|
||||
port = HTTPS_PORT;
|
||||
}
|
||||
cli = std::make_unique<httplib::SSLClient>(parsedUrl.m_Host.c_str(), port,
|
||||
TIMEOUT_SECONDS);
|
||||
cli = std::make_unique<httplib::SSLClient>(parsedUrl.m_Host.c_str(), port);
|
||||
cli->set_timeout_sec(TIMEOUT_SECONDS);
|
||||
} else {
|
||||
LOG_ERROR(WebService, "Bad URL scheme {}", parsedUrl.m_Scheme);
|
||||
return Common::WebResult{Common::WebResult::Code::InvalidURL, "Bad URL scheme"};
|
||||
|
|
Reference in New Issue