Compare commits

..

3 commits

Author SHA1 Message Date
45990c890d fix ci 2025-11-05 21:17:04 +01:00
2629151a04 update dockerfile 2025-11-05 20:27:30 +01:00
2c7cd2e83c fix memory leak 2025-07-31 14:40:21 +02:00
177 changed files with 7081 additions and 13908 deletions

View file

@ -1,15 +0,0 @@
#!/usr/bin/env bash
export EDITION=$1
source /etc/environment
: "${RNEX_CONTAINER_PLATFORM:=podman}"
for TARGET in node-holder proxy-secure proxy-insecure backend-auth backend-secure; do
$RNEX_CONTAINER_PLATFORM build \
--network=host \
--build-arg EDITION="$EDITION" \
--build-arg DATABASE_URL="$DATABASE_URL" \
-t "$CI_REGISTRY_IMAGE/$EDITION/$TARGET:$CI_COMMIT_SHORT_SHA" \
--target="$TARGET" .
$RNEX_CONTAINER_PLATFORM push "$CI_REGISTRY_IMAGE/$EDITION/$TARGET:$CI_COMMIT_SHORT_SHA"
done

View file

@ -1,3 +0,0 @@
{
"image": "ci.virintox.com/spfn/rust-nex/dev-container:latest"
}

View file

@ -1,12 +1,2 @@
.env
target
.dockerignore
Dockerfile
CODE_OF_CONDUCT.md
CONTRIBUTING.md
README.md
.gitignore
LICENSE
.devcontainer.json
.ci-scripts/make-edition.sh
.forgejo/workflows/build.yml
target

View file

@ -1,314 +0,0 @@
name: Build and Test
on:
push:
branches: ["**"]
pull_request:
env:
DOCKER_TLS_CERTDIR: /certs
IMAGE_TAG: ${{ github.sha }}
SHORT_SHA: ${{ github.sha }}
jobs:
mario-tennis:
runs-on: debian-trixie
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache container storage
uses: actions/cache@v4
with:
path: |
/var/lib/containers/storage
/run/containers/storage
~/.local/share/containers/storage
key: image-cache
- name: Login to registry
run: docker login -u ${{ secrets.PACKAGE_USER }} -p ${{ secrets.PACKAGE_PWD }} git.spbr.net
- name: Set short SHA
run: echo "SHORT_SHA=${GITHUB_SHA::6}" >> $GITHUB_ENV
- name: Build MTUS tetris edition
env:
CI_REGISTRY_IMAGE: git.spbr.net/spacebar/rust-nex
CI_COMMIT_SHORT_SHA: ${{ env.SHORT_SHA }}
run: ./.ci-scripts/make-edition.sh mario-tennis
wii-sports-club:
runs-on: debian-trixie
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache container storage
uses: actions/cache@v4
with:
path: |
/var/lib/containers/storage
/run/containers/storage
~/.local/share/containers/storage
key: image-cache
- name: Login to registry
run: docker login -u ${{ secrets.PACKAGE_USER }} -p ${{ secrets.PACKAGE_PWD }} git.spbr.net
- name: Set short SHA
run: echo "SHORT_SHA=${GITHUB_SHA::6}" >> $GITHUB_ENV
- name: Build Wii Sports Club edition
env:
CI_REGISTRY_IMAGE: git.spbr.net/spacebar/rust-nex
CI_COMMIT_SHORT_SHA: ${{ env.SHORT_SHA }}
run: ./.ci-scripts/make-edition.sh wii-sports-club
puyopuyo:
runs-on: debian-trixie
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache container storage
uses: actions/cache@v4
with:
path: |
/var/lib/containers/storage
/run/containers/storage
~/.local/share/containers/storage
key: image-cache
- name: Login to registry
run: docker login -u ${{ secrets.PACKAGE_USER }} -p ${{ secrets.PACKAGE_PWD }} git.spbr.net
- name: Set short SHA
run: echo "SHORT_SHA=${GITHUB_SHA::6}" >> $GITHUB_ENV
- name: Build puyo puyo tetris edition
env:
CI_REGISTRY_IMAGE: git.spbr.net/spacebar/rust-nex
CI_COMMIT_SHORT_SHA: ${{ env.SHORT_SHA }}
run: ./.ci-scripts/make-edition.sh puyopuyo
minecraft-wiiu:
runs-on: debian-trixie
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache container storage
uses: actions/cache@v4
with:
path: |
/var/lib/containers/storage
/run/containers/storage
~/.local/share/containers/storage
key: image-cache
- name: Login to registry
run: docker login -u ${{ secrets.PACKAGE_USER }} -p ${{ secrets.PACKAGE_PWD }} git.spbr.net
- name: Set short SHA
run: echo "SHORT_SHA=${GITHUB_SHA::6}" >> $GITHUB_ENV
- name: Build Minecraft Wii U edition
env:
CI_REGISTRY_IMAGE: git.spbr.net/spacebar/rust-nex
CI_COMMIT_SHORT_SHA: ${{ env.SHORT_SHA }}
run: ./.ci-scripts/make-edition.sh minecraft-wiiu
splatoon-testfire:
runs-on: debian-trixie
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache container storage
uses: actions/cache@v4
with:
path: |
/var/lib/containers/storage
/run/containers/storage
~/.local/share/containers/storage
key: image-cache
- name: Login to registry
run: docker login -u ${{ secrets.PACKAGE_USER }} -p ${{ secrets.PACKAGE_PWD }} git.spbr.net
- name: Set short SHA
run: echo "SHORT_SHA=${GITHUB_SHA::6}" >> $GITHUB_ENV
- name: Build Splatoon Testfire edition
env:
CI_REGISTRY_IMAGE: git.spbr.net/spacebar/rust-nex
CI_COMMIT_SHORT_SHA: ${{ env.SHORT_SHA }}
run: ./.ci-scripts/make-edition.sh splatoon-testfire
fast-racing-neo:
runs-on: debian-trixie
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache container storage
uses: actions/cache@v4
with:
path: |
/var/lib/containers/storage
/run/containers/storage
~/.local/share/containers/storage
key: image-cache
- name: Login to registry
run: docker login -u ${{ secrets.PACKAGE_USER }} -p ${{ secrets.PACKAGE_PWD }} git.spbr.net
- name: Set short SHA
run: echo "SHORT_SHA=${GITHUB_SHA::6}" >> $GITHUB_ENV
- name: Build Fast Racing NEO edition
env:
CI_REGISTRY_IMAGE: git.spbr.net/spacebar/rust-nex
CI_COMMIT_SHORT_SHA: ${{ env.SHORT_SHA }}
run: ./.ci-scripts/make-edition.sh fast-racing-neo
wii-u-chat:
runs-on: debian-trixie
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache container storage
uses: actions/cache@v4
with:
path: |
/var/lib/containers/storage
/run/containers/storage
~/.local/share/containers/storage
key: image-cache
- name: Login to registry
run: docker login -u ${{ secrets.PACKAGE_USER }} -p ${{ secrets.PACKAGE_PWD }} git.spbr.net
- name: Set short SHA
run: echo "SHORT_SHA=${GITHUB_SHA::6}" >> $GITHUB_ENV
- name: Build Wii U Chat edition
env:
CI_REGISTRY_IMAGE: git.spbr.net/spacebar/rust-nex
CI_COMMIT_SHORT_SHA: ${{ env.SHORT_SHA }}
run: ./.ci-scripts/make-edition.sh wii-u-chat
splatoon:
runs-on: debian-trixie
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache container storage
uses: actions/cache@v4
with:
path: |
/var/lib/containers/storage
/run/containers/storage
~/.local/share/containers/storage
key: image-cache
- name: Login to registry
run: docker login -u ${{ secrets.PACKAGE_USER }} -p ${{ secrets.PACKAGE_PWD }} git.spbr.net
- name: Set short SHA
run: echo "SHORT_SHA=${GITHUB_SHA::6}" >> $GITHUB_ENV
- name: Build Splatoon edition
env:
CI_REGISTRY_IMAGE: git.spbr.net/spacebar/rust-nex
CI_COMMIT_SHORT_SHA: ${{ env.SHORT_SHA }}
run: ./.ci-scripts/make-edition.sh splatoon
friends:
runs-on: debian-trixie
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache container storage
uses: actions/cache@v4
with:
path: |
/var/lib/containers/storage
/run/containers/storage
~/.local/share/containers/storage
key: image-cache
- name: Set short SHA
run: echo "SHORT_SHA=${GITHUB_SHA::6}" >> $GITHUB_ENV
- name: Login to registry
run: podman login -u ${{ secrets.PACKAGE_USER }} -p ${{ secrets.PACKAGE_PWD }} git.spbr.net
- name: Build Friends edition
env:
CI_REGISTRY_IMAGE: git.spbr.net/spacebar/rust-nex
CI_COMMIT_SHORT_SHA: ${{ env.SHORT_SHA }}
DATABASE_URL: ${{ secrets.DATABASE_FRIENDS }}
run: ./.ci-scripts/make-edition.sh friends
super-mario-maker:
runs-on: debian-trixie
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache container storage
uses: actions/cache@v4
with:
path: |
/var/lib/containers/storage
/run/containers/storage
~/.local/share/containers/storage
key: image-cache
- name: Set short SHA
run: echo "SHORT_SHA=${GITHUB_SHA::6}" >> $GITHUB_ENV
- name: Login to registry
run: podman login -u ${{ secrets.PACKAGE_USER }} -p ${{ secrets.PACKAGE_PWD }} git.spbr.net
- name: Build Super Mario Maker edition
env:
CI_REGISTRY_IMAGE: git.spbr.net/spacebar/rust-nex
CI_COMMIT_SHORT_SHA: ${{ env.SHORT_SHA }}
DATABASE_URL: ${{ secrets.DATABASE_SMM }}
run: ./.ci-scripts/make-edition.sh super-mario-maker

3
.gitignore vendored
View file

@ -2,5 +2,4 @@ target
.idea
.env
log
reports
.zed
reports

View file

@ -1,30 +1,28 @@
default:
image: quay.io/podman/stable
cache:
key: image-cache
paths:
- /var/lib/containers/storage
- /run/containers/storage
- .local/share/containers/storage
before_script:
- git submodule update --init
- podman login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
variables:
DOCKER_TLS_CERTDIR: "/certs"
IMAGE_TAG: "${CI_COMMIT_REF_SLUG}"
stages:
- build_and_test
- test
- build-and-push-image
- push-retagged-branch
- push-retagged-latest
splatoon:
stage: build_and_test
script: ./.ci-scripts/make-edition.sh splatoon
friends:
stage: build_and_test
script: ./.ci-scripts/make-edition.sh friends
super-mario-maker:
stage: build_and_test
script: ./.ci-scripts/make-edition.sh super-mario-maker
build-and-push-image:
stage: build-and-push-image
script:
- git submodule update --init
- podman login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- podman build -t "$CI_REGISTRY_IMAGE/node-holder:$CI_COMMIT_SHORT_SHA" --target=node-holder .
- podman build -t "$CI_REGISTRY_IMAGE/proxy-secure-v1:$CI_COMMIT_SHORT_SHA" --target=proxy-secure-v1 .
- podman build -t "$CI_REGISTRY_IMAGE/proxy-insecure-v1:$CI_COMMIT_SHORT_SHA" --target=proxy-insecure-v1 .
- podman build -t "$CI_REGISTRY_IMAGE/backend-auth:$CI_COMMIT_SHORT_SHA" --target=backend-auth .
- podman build -t "$CI_REGISTRY_IMAGE/backend-secure:$CI_COMMIT_SHORT_SHA" --target=backend-secure .
- podman push "$CI_REGISTRY_IMAGE/node-holder:$CI_COMMIT_SHORT_SHA"
- podman push "$CI_REGISTRY_IMAGE/proxy-secure-v1:$CI_COMMIT_SHORT_SHA"
- podman push "$CI_REGISTRY_IMAGE/proxy-insecure-v1:$CI_COMMIT_SHORT_SHA"
- podman push "$CI_REGISTRY_IMAGE/backend-auth:$CI_COMMIT_SHORT_SHA"
- podman push "$CI_REGISTRY_IMAGE/backend-secure:$CI_COMMIT_SHORT_SHA"

3
.gitmodules vendored Normal file
View file

@ -0,0 +1,3 @@
[submodule "grpc-protobufs"]
path = grpc-protobufs
url = https://github.com/PretendoNetwork/grpc-protobufs.git

View file

@ -1,5 +0,0 @@
{
"rust-analyzer.cargo.features": [
"friends"
]
}

3549
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,8 +1,91 @@
[workspace]
resolver = "3"
members = [
"macros",
"rnex-core",
"prudpv1",
"prudpv0"
, "proxy", "proxy-common", "prudplite"]
[package]
name = "rust-nex"
version = "0.1.0"
edition = "2021"
[profile.prod]
inherits = "release"
overflow-checks = false
strip = true
debug = false
debug-assertions = false
lto = true
incremental = false
[dependencies]
bytemuck = { version = "1.21.0", features = ["derive"] }
dotenv = "0.15.0"
once_cell = "1.20.2"
rc4 = "0.1.0"
thiserror = "2.0.11"
v_byte_macros = { git = "https://github.com/DJMrTV/VByteMacros" }
simplelog = "0.12.2"
chrono = "0.4.39"
log = "0.4.25"
anyhow = "1.0.95"
rand = "0.8.5"
hmac = "0.12.1"
md-5 = "^0.10.6"
tokio = { version = "1.43.0", features = ["macros", "rt-multi-thread", "net", "sync", "fs"] }
tokio-stream = { version = "0.1.17", features = ["io-util"] }
tonic = "0.12.3"
prost = "0.13.4"
hex = "0.4.3"
macros = { path = "macros" }
rocket = { version = "0.5.1", features = ["json", "serde_json"] }
serde = { version = "1.0.217", features = ["derive"] }
async-trait = "0.1.86"
paste = "1.0.15"
typenum = "1.18.0"
futures = "0.3.31"
reqwest = "0.12.18"
json = "0.12.4"
ctrlc = "3.4.7"
rsa = "0.9.8"
sha2 = "0.10.9"
chacha20 = "0.9.1"
rustls = "0.23.27"
rustls-pki-types = "1.12.0"
rustls-webpki = "0.103.3"
tokio-rustls = "0.26.2"
tokio-tungstenite = "0.27.0"
tungstenite = "0.27.0"
[build-dependencies]
tonic-build = "0.12.3"
[features]
default = ["secure", "auth"]
secure = []
auth = []
no_tls = []
[[bin]]
name = "proxy_insecure"
path = "src/executables/proxy_insecure.rs"
[[bin]]
name = "proxy_secure"
path = "src/executables/proxy_secure.rs"
[[bin]]
name = "backend_server_insecure"
path = "src/executables/backend_server_insecure.rs"
[[bin]]
name = "backend_server_secure"
path = "src/executables/backend_server_secure.rs"
[[bin]]
name = "edge_node_holder_server"
path = "src/executables/edge_node_holder_server.rs"

View file

@ -1,57 +1,34 @@
# syntax=docker/dockerfile:1
FROM rust:alpine AS chef
RUN apk add --no-cache musl-dev lld g++ make
RUN cargo install cargo-chef
FROM rust:alpine AS builder
WORKDIR /app
FROM chef AS planner
COPY . .
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
RUN apk add --no-cache protobuf-dev git openssl-dev openssl-libs-static bash yq
RUN apk add --no-cache protobuf-dev git musl-dev lld openssl-dev openssl-libs-static
COPY --from=planner /app/recipe.json recipe.json
ARG EDITION
ARG DATABASE_URL
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/app/target \
cargo chef cook --release --recipe-path recipe.json --target x86_64-unknown-linux-musl && \
cargo chef cook --tests --target x86_64-unknown-linux-musl --recipe-path recipe.json
COPY . .
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/app/target \
./test-edition.sh && ./build-edition.sh && \
mkdir -p /app/dist && \
cp /app/target/x86_64-unknown-linux-musl/release/edge_node_holder_server /app/dist/ && \
cp /app/target/x86_64-unknown-linux-musl/release/proxy_insecure /app/dist/ && \
cp /app/target/x86_64-unknown-linux-musl/release/proxy_secure /app/dist/ && \
cp /app/target/x86_64-unknown-linux-musl/release/backend_server_insecure /app/dist/ && \
cp /app/target/x86_64-unknown-linux-musl/release/backend_server_secure /app/dist/
RUN git submodule update --init --recursive
RUN OPENSSL_LIB_DIR=/usr/lib OPENSSL_INCLUDE_DIR=/usr/include/openssl OPENSSL_STATIC=1 RUSTFLAGS="-C relocation-model=static -C linker=ld.lld" cargo test --target x86_64-unknown-linux-musl
RUN OPENSSL_LIB_DIR=/usr/lib OPENSSL_INCLUDE_DIR=/usr/include/openssl OPENSSL_STATIC=1 RUSTFLAGS="-C relocation-model=static -C linker=ld.lld" cargo build --profile prod --target x86_64-unknown-linux-musl #
FROM scratch AS node-holder
COPY --from=builder /app/dist/edge_node_holder_server /edge_node_holder_server
COPY --from=builder /app/target/x86_64-unknown-linux-musl/prod/edge_node_holder_server /edge_node_holder_server
ENTRYPOINT ["/edge_node_holder_server"]
FROM scratch AS proxy-insecure
COPY --from=builder /app/dist/proxy_insecure /proxy_insecure
FROM scratch AS proxy-insecure-v1
COPY --from=builder /app/target/x86_64-unknown-linux-musl/prod/proxy_insecure /proxy_insecure
ENTRYPOINT ["/proxy_insecure"]
FROM scratch AS proxy-secure
COPY --from=builder /app/dist/proxy_secure /proxy_secure
FROM scratch AS proxy-secure-v1
COPY --from=builder /app/target/x86_64-unknown-linux-musl/prod/proxy_secure /proxy_secure
ENTRYPOINT ["/proxy_secure"]
FROM scratch AS backend-auth
COPY --from=builder /app/dist/backend_server_insecure /backend_server_insecure
ENTRYPOINT ["/backend_server_insecure"]
FROM scratch AS backend-secure
COPY --from=builder /app/dist/backend_server_secure /backend_server_secure
FROM scratch AS backend-auth
COPY --from=builder /app/target/x86_64-unknown-linux-musl/prod/backend_server_insecure /backend_server_insecure
ENTRYPOINT ["/backend_server_secure"]
FROM chef AS dev-container
RUN apk add --no-cache openjdk21-jdk gcompat git bash protobuf-dev
COPY --from=builder /app/dist/* /usr/local/bin/
FROM scratch AS backend-secure
COPY --from=builder /app/target/x86_64-unknown-linux-musl/prod/backend_server_secure /backend_server_secure
ENTRYPOINT ["/backend_server_secure"]

View file

@ -1,16 +1,7 @@
# Rust NEX monorepo
This repo contains the code for all game servers using RNEX.
# Splatoon NEX Server in Rust
## Credits:
- Pretendo team for their reverse engineering efforts
- Pretendo team for the rest of the Servers and Reverse engineering efforts
- Kinnay for his huge work on reversing nex servers and documentation(https://github.com/Kinnay/NintendoClients/)
- Splatfestival testing team for helping us test our messes of code
- The SPFN team(RusticMaple, BloxerHD, Ceantix, RedBinder0526)
This NEX implementation was not created to rival Pretendo, we don't want any bad blood between anyone.
This project would never have been possible without their reverse engineering efforts.
As such if you want to respect the Authors wishes, do not use it if you mean any harm to Pretendo. (harm falls under e.g. using this software while also sabotaging pretendo) If you do show intent to harm them you will be blocked from ever contributing and will be refused support.
We felt like this needed to be said as there are far too many Pretendo copycats who blatantly copy their code and use their reversal efforts with no credits in sight in an attempt to harm them for some grudge or stupid reason.
We feel that by working together and not against each other we can reach a better and healthier future for the community, health of developers and numerous more reasons.
- The SPFN team(Andrea and DJMrTV)

View file

@ -1,15 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if [ -z ${EDITION+x} ]; then
EDITION=$1
fi
# comma seperated list of features for the specified version
source ./buildscripts/common.sh
echo building $EDITION
echo FEATURES:
echo $EDITION_FEATURES
OPENSSL_LIB_DIR=/usr/lib OPENSSL_INCLUDE_DIR=/usr/include/openssl OPENSSL_STATIC=1 RUSTFLAGS="-C relocation-model=static -C linker=ld.lld" cargo build --release --features "$EDITION_FEATURES" --target x86_64-unknown-linux-musl

View file

@ -1,14 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
TMP_FEATURES_TRAILINGCOMMA=$(yq ea ".$EDITION.features" editions.yaml | sed 's/- //g' | tr '\n' ',')
echo "tmpfeatures: $TMP_FEATURES_TRAILINGCOMMA"
export EDITION_FEATURES=${TMP_FEATURES_TRAILINGCOMMA::-1}
SETTINGS=$(yq ea ".$EDITION.settings" editions.yaml | yq 'keys[]')
IFS=$'\n'
while IFS=$'\n' read -r KEY; do
VAL=$(yq ea ".$EDITION.settings.$KEY" editions.yaml)
declare "$KEY=$VAL"
export $KEY
done <<< "$SETTINGS"

View file

@ -1,14 +0,0 @@
#!/bin/sh
echo "running cargo check..."
./check-all.sh
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "cargo check failed, aborting"
exit 1
fi
echo "cargo check passed"
exit 0

View file

@ -1,13 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
EDITIONS=$(yq ea "." editions.yaml | yq 'keys[]')
IFS=$'\n'
while IFS=$'\n' read -r EDITION; do
if [[ $(yq ea ".$EDITION.include-in-checkall" editions.yaml) == "true" ]]
then
export EDITION
./check-edition.sh $EDITION
fi
done <<< "$EDITIONS"

View file

@ -1,16 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if [ -z ${EDITION+x} ]; then
export EDITION=$1
fi
# comma seperated list of features for the specified version
source ./buildscripts/common.sh
echo CHECKING $EDITION
echo FEATURES:
echo $EDITION_FEATURES
cargo check --features "$EDITION_FEATURES"

View file

@ -1,117 +0,0 @@
wii-sports-club:
include-in-checkall: true
features:
- prudpv1
- third-notif-param
- v3-8-15
settings:
AUTH_REPORT_VERSION: "branch:origin/project/appsp build:3_4_24_4_0"
RNEX_VIRTUAL_PORT_INSECURE: "1:10"
RNEX_VIRTUAL_PORT_SECURE: "1:10"
RNEX_DEFAULT_PORT: 10000
RNEX_ACCESS_KEY: "4d324052"
puyopuyo:
include-in-checkall: true
features:
- prudpv1
- third-notif-param
- v3-8-15
settings:
AUTH_REPORT_VERSION: "branch:origin/release/ngs/3.5.x.1000 build:3_5_16_1000_0"
RNEX_VIRTUAL_PORT_INSECURE: "1:10"
RNEX_VIRTUAL_PORT_SECURE: "1:10"
RNEX_DEFAULT_PORT: 10000
RNEX_ACCESS_KEY: "4eb0ca36"
minecraft-wiiu:
include-in-checkall: true
features:
- prudpv1
- third-notif-param
- v3-10-22
settings:
AUTH_REPORT_VERSION: "branch:origin/release/ngs/3.10.x.200x build:3_10_22_2006_0"
RNEX_VIRTUAL_PORT_INSECURE: "1:10"
RNEX_VIRTUAL_PORT_SECURE: "1:10"
RNEX_DEFAULT_PORT: 13000
RNEX_ACCESS_KEY: "f1b61c8e"
mario-tennis:
include-in-checkall: true
features:
- prudpv1
- third-notif-param
- v3-8-15
settings:
AUTH_REPORT_VERSION: "branch:origin/release/ngs/3.9.x.200x build:3_9_19_2005_0"
RNEX_VIRTUAL_PORT_INSECURE: "1:10"
RNEX_VIRTUAL_PORT_SECURE: "1:10"
RNEX_DEFAULT_PORT: 10000
RNEX_ACCESS_KEY: "c69b92a0"
wii-u-chat:
include-in-checkall: true
features:
- prudpv1
- third-notif-param
- v3-3-2
settings:
AUTH_REPORT_VERSION: "branch:origin/project/wup-agmj build:3_8_15_2004_0"
RNEX_VIRTUAL_PORT_INSECURE: "1:10"
RNEX_VIRTUAL_PORT_SECURE: "1:10"
RNEX_DEFAULT_PORT: 10000
RNEX_ACCESS_KEY: "e7a47214"
fast-racing-neo:
include-in-checkall: true
features:
- prudpv1
- v3-8-15
settings:
AUTH_REPORT_VERSION: "branch:origin/release/ngs/3.9.x.200x build:3_9_19_2005_0"
RNEX_VIRTUAL_PORT_INSECURE: "1:10"
RNEX_VIRTUAL_PORT_SECURE: "1:10"
RNEX_DEFAULT_PORT: 10000
RNEX_ACCESS_KEY: "811aa39f"
splatoon:
include-in-checkall: true
features:
- prudpv1
- v3-8-15
- splatoon
settings:
AUTH_REPORT_VERSION: "branch:origin/project/wup-agmj build:3_8_15_2004_0"
RNEX_VIRTUAL_PORT_INSECURE: "1:10"
RNEX_VIRTUAL_PORT_SECURE: "1:10"
RNEX_DEFAULT_PORT: 6000
RNEX_ACCESS_KEY: "6f599f81"
splatoon-testfire:
include-in-checkall: true
features:
- prudpv1
- v3-8-15
- splatoon
settings:
AUTH_REPORT_VERSION: "branch:origin/project/wup-agmj build:3_8_15_2004_0"
RNEX_VIRTUAL_PORT_INSECURE: "1:10"
RNEX_VIRTUAL_PORT_SECURE: "1:10"
RNEX_DEFAULT_PORT: 10000
RNEX_ACCESS_KEY: "da693ee5"
friends:
include-in-checkall: false
features:
- friends
settings:
AUTH_REPORT_VERSION: "branch:origin/feature/45925_FixAutoReconnect build:3_10_11_2006_0"
RNEX_VIRTUAL_PORT_INSECURE: "1:10"
RNEX_VIRTUAL_PORT_SECURE: "1:10"
RNEX_DEFAULT_PORT: 6000
RNEX_ACCESS_KEY: "ridfebb9"
super-mario-maker:
include-in-checkall: false
features:
- prudpv1
- v3-8-15
- datastore
settings:
AUTH_REPORT_VERSION: "branch:origin/project/wup-ama build:3_8_29_3022_0"
RNEX_VIRTUAL_PORT_INSECURE: "1:10"
RNEX_VIRTUAL_PORT_SECURE: "1:10"
RNEX_DEFAULT_PORT: 6000
RNEX_ACCESS_KEY: "9f2b4678"

61
flake.lock generated
View file

@ -1,61 +0,0 @@
{
"nodes": {
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1767609335,
"narHash": "sha256-feveD98mQpptwrAEggBQKJTYbvwwglSbOv53uCfH9PY=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "250481aafeb741edfe23d29195671c19b36b6dca",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1767640445,
"narHash": "sha256-UWYqmD7JFBEDBHWYcqE6s6c77pWdcU/i+bwD6XxMb8A=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "9f0c42f8bc7151b8e7e5840fb3bd454ad850d8c5",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1765674936,
"narHash": "sha256-k00uTP4JNfmejrCLJOwdObYC9jHRrr/5M/a/8L2EIdo=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "2075416fcb47225d9b68ac469a5c4801a9c4dd85",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"root": {
"inputs": {
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,28 +0,0 @@
{
description = "rust nex server";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-unstable";
flake-parts.url = "github:hercules-ci/flake-parts";
};
outputs =
inputs@{
self,
nixpkgs,
flake-parts,
}:
flake-parts.lib.mkFlake { inherit inputs; } {
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
perSystem =
{ pkgs, lib, ... }:
rec {
devShells.default = import ./shell.nix { inherit pkgs; };
};
};
}

1
grpc-protobufs Submodule

@ -0,0 +1 @@
Subproject commit 405fe9b47b416e76b21d7087b2ed11606deccfcf

113
macros/Cargo.lock generated
View file

@ -2,55 +2,15 @@
# It is not intended for manual editing.
version = 4
[[package]]
name = "bitflags"
version = "2.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
[[package]]
name = "cfg-if"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
[[package]]
name = "getrandom"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi",
]
[[package]]
name = "libc"
version = "0.2.174"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
[[package]]
name = "macros"
version = "0.0.0"
dependencies = [
"proc-macro2",
"quote",
"rand",
"syn",
]
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.93"
@ -69,41 +29,6 @@ dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
name = "rand"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
dependencies = [
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
dependencies = [
"getrandom",
]
[[package]]
name = "syn"
version = "2.0.98"
@ -120,41 +45,3 @@ name = "unicode-ident"
version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034"
[[package]]
name = "wasi"
version = "0.14.2+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
dependencies = [
"wit-bindgen-rt",
]
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
"bitflags",
]
[[package]]
name = "zerocopy"
version = "0.8.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

View file

@ -1,16 +1,17 @@
[package]
name = "macros"
version = "0.0.0"
authors = ["RusticMaple <tvnebel@gmail.com>"]
authors = ["DJMrTV <tvnebel@gmail.com>"]
description = "A `cargo generate` template for quick-starting a procedural macro crate"
keywords = ["template", "proc_macro", "procmacro"]
edition = "2018"
[lib]
proc-macro = true
doctest = false
[dependencies]
quote = "1.0.38"
proc-macro2 = "1.0.93"
syn = { version = "2.0.98", features = ["full"] }
rand = "0.9.0"

View file

@ -1,75 +1,378 @@
#![allow(dead_code)]
mod protos;
mod rmc_struct;
mod util;
extern crate proc_macro;
use crate::protos::{ProtoInputParams, RmcProtocolData};
use crate::rmc_struct::{rmc_serialize_enum, rmc_serialize_struct};
use crate::protos::{ProtoMethodData, RmcProtocolData};
use proc_macro::TokenStream;
use proc_macro2::Ident;
use quote::quote;
use proc_macro2::{Ident, Literal, Span};
use quote::{quote, TokenStreamExt};
use syn::parse::{Parse, ParseStream};
use syn::punctuated::Punctuated;
use syn::spanned::Spanned;
use syn::{parse_macro_input, Data, DeriveInput, Lit, LitStr};
use syn::{
parse_macro_input, Attribute, Data, DataStruct, DeriveInput, Fields, FnArg, LitInt, Pat, Token,
TraitItem,
};
struct ProtoInputParams {
proto_num: LitInt,
properties: Option<(Token![,], Punctuated<Ident, Token![,]>)>,
}
impl Parse for ProtoInputParams {
fn parse(input: ParseStream) -> syn::Result<Self> {
let proto_num = input.parse()?;
if let Some(seperator) = input.parse()? {
let mut punctuated = Punctuated::new();
loop {
punctuated.push_value(input.parse()?);
if let Some(punct) = input.parse()? {
punctuated.push_punct(punct);
} else {
return Ok(Self {
proto_num,
properties: Some((seperator, punctuated)),
});
}
}
} else {
Ok(Self {
proto_num,
properties: None,
})
}
}
}
fn gen_serialize_data_struct(
s: DataStruct,
struct_attr: Option<&Attribute>,
) -> (proc_macro2::TokenStream, proc_macro2::TokenStream) {
let serialize_base_content = {
let mut serialize_content = quote! {};
for f in &s.fields {
if f.attrs.iter().any(|a| {
a.path().segments.len() == 1
&& a.path()
.segments
.first()
.is_some_and(|p| p.ident.to_string() == "extends")
}) {
continue;
}
let ident = f.ident.as_ref().unwrap();
serialize_content.append_all(quote! {
self.#ident.serialize(writer)?;
})
}
quote! {
#serialize_content
Ok(())
}
};
let struct_ctor = {
let mut structure_content = quote! {};
for f in &s.fields {
let ident = f.ident.as_ref().unwrap();
structure_content.append_all(quote! {#ident, });
}
quote! {
Ok(Self{
#structure_content
})
}
};
let deserialize_base_content = {
let mut deserialize_content = quote! {};
for f in &s.fields {
if f.attrs.iter().any(|a| {
a.path().segments.len() == 1
&& a.path()
.segments
.first()
.is_some_and(|p| p.ident.to_string() == "extends")
}) {
continue;
}
let ident = f.ident.as_ref().unwrap();
let ty = &f.ty;
deserialize_content.append_all(quote! {
let #ident = <#ty> :: deserialize(reader)?;
})
}
quote! {
#deserialize_content
#struct_ctor
}
};
// generate base with extends stuff
let serialize_base_content = if let Some(attr) = struct_attr {
let version: Literal = attr.parse_args().expect("has to be a literal");
let pre_inner = if let Some(f) = s.fields.iter().find(|f| {
f.attrs.iter().any(|a| {
a.path().segments.len() == 1
&& a.path()
.segments
.first()
.is_some_and(|p| p.ident.to_string() == "extends")
})
}) {
let ident = f.ident.as_ref().unwrap();
quote! {
self.#ident.serialize(writer)?;
}
} else {
quote! {}
};
quote! {
#pre_inner
rust_nex::rmc::structures::rmc_struct::write_struct(writer, #version, |mut writer|{
#serialize_base_content
})?;
Ok(())
}
} else {
serialize_base_content
};
let deserialize_base_content = if let Some(attr) = struct_attr {
let version: Literal = attr.parse_args().expect("has to be a literal");
let pre_inner = if let Some(f) = s.fields.iter().find(|f| {
f.attrs.iter().any(|a| {
a.path().segments.len() == 1
&& a.path()
.segments
.first()
.is_some_and(|p| p.ident.to_string() == "extends")
})
}) {
let ident = f.ident.as_ref().unwrap();
let ty = &f.ty;
quote! {
let #ident = <#ty> :: deserialize(reader)?;
}
} else {
quote! {}
};
quote! {
#pre_inner
Ok(rust_nex::rmc::structures::rmc_struct::read_struct(reader, #version, move |mut reader|{
#deserialize_base_content
})?)
}
} else {
deserialize_base_content
};
(serialize_base_content, deserialize_base_content)
}
#[proc_macro_derive(RmcSerialize, attributes(extends, rmc_struct))]
pub fn rmc_serialize(input: TokenStream) -> TokenStream {
let derive_input = parse_macro_input!(input as DeriveInput);
let (serialize, deserialize, write_size, version) = match &derive_input.data {
Data::Struct(s) => rmc_serialize_struct(s, &derive_input),
Data::Enum(e) => rmc_serialize_enum(e, &derive_input),
let struct_attr = derive_input.attrs.iter().find(|a| {
a.path().segments.len() == 1
&& a.path()
.segments
.first()
.is_some_and(|p| p.ident.to_string() == "rmc_struct")
});
let repr_attr = derive_input.attrs.iter().find(|a| {
a.path().segments.len() == 1
&& a.path()
.segments
.first()
.is_some_and(|p| p.ident.to_string() == "repr")
});
/*let Data::Struct(s) = derive_input.data else {
panic!("rmc struct type MUST be a struct");
};*/
let (serialize_base_content, deserialize_base_content) = match derive_input.data {
Data::Struct(s) => gen_serialize_data_struct(s, struct_attr),
Data::Enum(e) => {
let Some(repr_attr) = repr_attr else {
panic!("missing repr attribute");
};
let ty: Ident = repr_attr.parse_args().unwrap();
let mut inner_match_de = quote! {};
let mut inner_match_se = quote! {};
for variant in e.variants {
let Some((_, val)) = variant.discriminant else {
panic!("missing discriminant");
};
let field_data_de = match &variant.fields {
Fields::Named(v) => {
let mut base = quote! {};
for field in v.named.iter() {
let ty = &field.ty;
let name = &field.ident;
base.append_all(quote!{
#name: <#ty as rust_nex::rmc::structures::RmcSerialize>::deserialize(reader)?,
});
}
quote! {{#base}}
}
Fields::Unnamed(n) => {
let mut base = quote! {};
for field in n.unnamed.iter() {
let ty = &field.ty;
base.append_all(quote!{
<#ty as rust_nex::rmc::structures::RmcSerialize>::deserialize(reader)?,
});
}
quote! {(#base)}
}
Fields::Unit => {
quote! {}
}
};
let mut se_with_fields = quote! {
<#ty as rust_nex::rmc::structures::RmcSerialize>::serialize(&#val, writer)?;
};
match &variant.fields {
Fields::Named(v) => {
for field in v.named.iter() {
let ty = &field.ty;
let name = &field.ident;
se_with_fields.append_all(quote!{
<#ty as rust_nex::rmc::structures::RmcSerialize>::serialize(#name ,writer)?;
});
}
}
Fields::Unnamed(n) => {
for (i, field) in n.unnamed.iter().enumerate() {
let ty = &field.ty;
let ident = Ident::new(&format!("val_{}", i), Span::call_site());
se_with_fields.append_all(quote!{
<#ty as rust_nex::rmc::structures::RmcSerialize>::serialize(#ident, writer)?;
});
}
}
Fields::Unit => {}
};
let field_match_se = match &variant.fields {
Fields::Named(v) => {
let mut base = quote! {};
for field in v.named.iter() {
let name = &field.ident;
base.append_all(quote! {
#name,
});
}
quote! {{#base}}
}
Fields::Unnamed(n) => {
let mut base = quote! {};
for (i, _field) in n.unnamed.iter().enumerate() {
let ident = Ident::new(&format!("val_{}", i), Span::call_site());
base.append_all(quote! {
#ident,
});
}
quote! {(#base)}
}
Fields::Unit => {
quote! {}
}
};
let name = variant.ident;
inner_match_de.append_all(quote! {
#val => Self::#name #field_data_de,
});
inner_match_se.append_all(quote! {
Self::#name #field_match_se => {
#se_with_fields
},
});
}
let serialize_base_content = quote! {
match self{
#inner_match_se
};
Ok(())
};
let deserialize_base_content = quote! {
let val: Self = match <#ty as rust_nex::rmc::structures::RmcSerialize>::deserialize(reader)?{
#inner_match_de
v => return Err(rust_nex::rmc::structures::Error::UnexpectedValue(v as _))
};
Ok(val)
};
(serialize_base_content, deserialize_base_content)
}
Data::Union(_) => {
unimplemented!("serialize a union is not allowed");
unimplemented!()
}
};
// generate base data
let str_name = Lit::Str(LitStr::new(
&derive_input.ident.to_string(),
derive_input.ident.span(),
));
let ident = derive_input.ident;
let write_size = if let Some(v) = write_size {
quote! {
fn serialize_write_size(&self) -> rnex_core::rmc::structures::Result<u32>{
#v
}
}
} else {
quote! {}
};
let version = if let Some(v) = version {
quote! {
fn version() -> Option<u8>{
#v
}
}
} else {
quote! {}
};
let tokens = quote! {
impl rnex_core::rmc::structures::RmcSerialize for #ident{
#[inline(always)]
fn serialize(&self, writer: &mut impl ::std::io::Write) -> rnex_core::rmc::structures::Result<()>{
#serialize
}
#[inline(always)]
fn deserialize(reader: &mut impl ::std::io::Read) -> rnex_core::rmc::structures::Result<Self>{
#deserialize
impl rust_nex::rmc::structures::RmcSerialize for #ident{
fn serialize(&self, writer: &mut dyn ::std::io::Write) -> rust_nex::rmc::structures::Result<()>{
#serialize_base_content
}
#write_size
#version
fn name() -> &'static str{
#str_name
fn deserialize(reader: &mut dyn ::std::io::Read) -> rust_nex::rmc::structures::Result<Self>{
#deserialize_base_content
}
}
};
@ -104,9 +407,71 @@ pub fn rmc_serialize(input: TokenStream) -> TokenStream {
#[proc_macro_attribute]
pub fn rmc_proto(attr: TokenStream, input: TokenStream) -> TokenStream {
let params = parse_macro_input!(attr as ProtoInputParams);
let ProtoInputParams {
proto_num,
properties,
} = params;
let no_return_data =
properties.is_some_and(|p| p.1.iter().any(|i| i.to_string() == "NoReturn"));
let input = parse_macro_input!(input as syn::ItemTrait);
let raw_data = RmcProtocolData::new(params, &input);
// gigantic ass struct initializer (to summarize this gets all of the data)
let raw_data = RmcProtocolData {
has_returns: !no_return_data,
name: input.ident.clone(),
id: proto_num,
methods: input
.items
.iter()
.filter_map(|v| match v {
TraitItem::Fn(v) => Some(v),
_ => None,
})
.map(|func| {
let Some(attr) = func.attrs.iter().find(|a| {
a.path()
.segments
.last()
.is_some_and(|s| s.ident.to_string() == "method_id")
}) else {
panic!("every function inside of an rmc protocol must have a method id");
};
let Ok(id): Result<LitInt, _> = attr.parse_args() else {
panic!("todo: put a propper error message here");
};
let funcs = func
.sig
.inputs
.iter()
.skip(1)
.map(|f| {
let FnArg::Typed(t) = f else {
panic!("what");
};
let Pat::Ident(i) = &*t.pat else {
panic!(
"unable to handle non identifier patterns as parameter bindings"
);
};
(i.ident.clone(), t.ty.as_ref().clone())
})
.collect();
ProtoMethodData {
id,
name: func.sig.ident.clone(),
parameters: funcs,
ret_val: func.sig.output.clone(),
}
})
.collect(),
};
quote! {
#input
@ -146,8 +511,8 @@ pub fn rmc_struct(attr: TokenStream, input: TokenStream) -> TokenStream {
}
impl rnex_core::rmc::protocols::RmcCallable for #struct_name{
async fn rmc_call(&self, remote_response_connection: &rnex_core::util::SendingBufferConnection, protocol_id: u16, method_id: u32, call_id: u32, rest: Vec<u8>){
impl rust_nex::rmc::protocols::RmcCallable for #struct_name{
async fn rmc_call(&self, remote_response_connection: &rust_nex::util::SendingBufferConnection, protocol_id: u16, method_id: u32, call_id: u32, rest: Vec<u8>){
<Self as #ident>::rmc_call(self, remote_response_connection, protocol_id, method_id, call_id, rest).await;
}
}

View file

@ -1,319 +1,199 @@
use proc_macro2::{Ident, Span, TokenStream};
use quote::{quote, ToTokens};
use syn::{
parse::{Parse, ParseStream},
punctuated::Punctuated,
Attribute, FnArg, ItemTrait, LitInt, LitStr, Meta, Pat, ReturnType, Token, TraitItem, Type,
};
use syn::{LitInt, LitStr, ReturnType, Type};
use syn::token::{Brace, Paren, Semi};
use crate::util::fold_tokenable;
pub struct ProtoInputParams {
proto_num: LitInt,
properties: Option<(Token![,], Punctuated<Ident, Token![,]>)>,
}
impl Parse for ProtoInputParams {
fn parse(input: ParseStream) -> syn::Result<Self> {
let proto_num = input.parse()?;
if let Some(seperator) = input.parse()? {
let mut punctuated = Punctuated::new();
loop {
punctuated.push_value(input.parse()?);
if let Some(punct) = input.parse()? {
punctuated.push_punct(punct);
} else {
return Ok(Self {
proto_num,
properties: Some((seperator, punctuated)),
});
}
}
} else {
Ok(Self {
proto_num,
properties: None,
})
}
}
}
pub struct ProtoMethodData {
pub struct ProtoMethodData{
pub id: LitInt,
pub name: Ident,
pub attributes: Vec<Attribute>,
pub parameters: Vec<(Ident, Type, Vec<Attribute>)>,
pub parameters: Vec<(Ident, Type)>,
pub ret_val: ReturnType,
}
/// This is a representation of the code generated by `rmc_proto` it serves to split the logic of
/// acquiring data from the actual generation to tidy up the process into first getting then
/// generating.
///
/// Use the [`ToTokens`] trait to generate the actual code.
pub struct RmcProtocolData {
pub struct RmcProtocolData{
pub has_returns: bool,
pub id: LitInt,
pub name: Ident,
pub methods: Vec<ProtoMethodData>,
pub methods: Vec<ProtoMethodData>
}
impl RmcProtocolData {
pub fn new(params: ProtoInputParams, input: &ItemTrait) -> Self {
let ProtoInputParams {
proto_num,
properties,
} = params;
let no_return_data =
properties.is_some_and(|p| p.1.iter().any(|i| i.to_string() == "NoReturn"));
// gigantic ass struct initializer (to summarize this gets all of the data)
RmcProtocolData {
has_returns: !no_return_data,
name: input.ident.clone(),
id: proto_num,
methods: input
.items
.iter()
.filter_map(|v| match v {
TraitItem::Fn(v) => Some(v),
_ => None,
})
.map(|func| {
let Some(attr) = func.attrs.iter().find(|a| {
a.path()
.segments
.last()
.is_some_and(|s| s.ident.to_string() == "method_id")
}) else {
panic!("every function inside of an rmc protocol must have a method id");
};
let Ok(id): Result<LitInt, _> = attr.parse_args() else {
panic!("todo: put a propper error message here");
};
let funcs = func
.sig
.inputs
.iter()
.skip(1)
.map(|f| {
let FnArg::Typed(t) = f else {
panic!("what");
};
let Pat::Ident(i) = &*t.pat else {
panic!(
"unable to handle non identifier patterns as parameter bindings"
);
};
(i.ident.clone(), t.ty.as_ref().clone(), t.attrs.clone())
})
.collect();
ProtoMethodData {
id,
name: func.sig.ident.clone(),
parameters: funcs,
ret_val: func.sig.output.clone(),
attributes: func
.attrs
.iter()
.filter(|a| match &a.meta {
Meta::NameValue(v) => {
if let Some(i) = v.path.get_ident() {
i.to_string() != "doc"
} else {
true
}
}
Meta::List(l) => {
if let Some(seg) = l.path.segments.last() {
seg.ident.to_string() != "method_id"
} else {
true
}
}
_ => true,
})
.cloned()
.collect(),
}
})
.collect(),
}
}
fn generate_raw_trait(&self) -> TokenStream {
let Self {
impl RmcProtocolData{
fn generate_raw_trait(&self, tokens: &mut TokenStream){
let Self{
has_returns,
name,
id,
methods,
methods
} = self;
let generate_raw_method = |method: &ProtoMethodData| -> TokenStream {
let ProtoMethodData {
name,
parameters,
attributes,
..
} = method;
let attribs = fold_tokenable(attributes.iter());
let raw_name = Ident::new(&format!("raw_{}", name), name.span());
let optional_return = if self.has_returns {
quote! {
-> ::core::result::Result<Vec<u8>, ::rnex_core::rmc::response::ErrorCode>
}
} else {
quote! {}
}
.into_token_stream();
let deser_params =
fold_tokenable(parameters.iter().map(|(param_name, param_type, attribs)| {
let error_msg = LitStr::new(
&format!("an error occurred whilest deserializing {}", param_name),
Span::call_site(),
);
let return_from_deser_error = if self.has_returns {
quote! {
return Err(::rnex_core::rmc::response::ErrorCode::Core_InvalidArgument);
}
} else {
quote! {
return;
}
};
let attribs = fold_tokenable(attribs.iter());
quote! {
#attribs
let Ok(#param_name) =
<#param_type as rnex_core::rmc::structures::RmcSerialize>::deserialize(
&mut cursor
) else{
log::error!(#error_msg);
#return_from_deser_error
};
}
}));
let call_params = fold_tokenable(parameters.iter().map(|(param_name, _, attribs)| {
let attribs = fold_tokenable(attribs.iter());
quote! {
#attribs
#param_name,
}
}));
let optional_method_return = if *has_returns {
quote! {
let retval = retval?;
let mut vec = Vec::new();
rnex_core::rmc::structures::RmcSerialize::serialize(&retval, &mut vec).ok();
Ok(vec)
}
} else {
quote! {}
};
quote! {
#[inline(always)]
#attribs
async fn #raw_name (&self, data: ::std::vec::Vec<u8>) #optional_return{
let mut cursor = ::std::io::Cursor::new(data);
#deser_params
let retval = self.#name(#call_params).await;
#optional_method_return
}
}
};
let generate_rmc_call_proto = || {
let method_entries = fold_tokenable(methods.iter().map(|m| {
let ProtoMethodData {
id,
name,
attributes,
..
} = m;
let attribs = fold_tokenable(attributes.iter());
let raw_name = Ident::new(&format!("raw_{}", name), name.span());
quote! {
#attribs
#id => self.#raw_name(data).await,
}
}));
let optional_notimpl_return = if self.has_returns {
quote! {
Err(rnex_core::rmc::response::ErrorCode::Core_NotImplemented)
}
} else {
quote! {}
};
let optional_result_sendback = if *has_returns {
quote! {
rnex_core::rmc::response::send_result(
remote_response_connection,
ret,
#id,
method_id,
call_id,
).await
}
} else {
quote! {}
};
quote! {
#[inline(always)]
async fn rmc_call_proto(
&self,
remote_response_connection: &rnex_core::util::SendingBufferConnection,
method_id: u32,
call_id: u32,
data: Vec<u8>,
){
let ret = match method_id{
#method_entries
v => {
log::error!("(protocol {})unimplemented method id called on protocol: {}", #id, v);
#optional_notimpl_return
}
};
#optional_result_sendback
}
}
};
// this gives us the name which the identifier of the corresponding Raw trait
let raw_name = Ident::new(&format!("Raw{}", name), name.span());
let proto_raw_methods = fold_tokenable(self.methods.iter().map(|m| generate_raw_method(m)));
let rmc_call_proto = generate_rmc_call_proto();
// boilerplate tokens which all raw traits need
quote! {
quote!{
#[doc(hidden)]
#[allow(unused_must_use)]
pub trait #raw_name: #name{
#proto_raw_methods
#rmc_call_proto
pub trait #raw_name: #name
}.to_tokens(tokens);
// generate the body of the raw protocol trait
Brace::default().surround(tokens, |tokens|{
//generate each raw method
for method in methods{
let ProtoMethodData {
name,
parameters,
..
} = method;
let raw_name = Ident::new(&format!("raw_{}", name), name.span());
quote!{
async fn #raw_name
}.to_tokens(tokens);
Paren::default().surround(tokens, |tokens|{
quote!{ &self, data: ::std::vec::Vec<u8> }.to_tokens(tokens);
});
if self.has_returns {
quote! {
-> ::core::result::Result<Vec<u8>, ErrorCode>
}.to_tokens(tokens);
}
Brace::default().surround(tokens, |tokens|{
quote! { let mut cursor = ::std::io::Cursor::new(data); }.to_tokens(tokens);
for (param_name, param_type) in parameters{
quote!{
let Ok(#param_name) =
<#param_type as rust_nex::rmc::structures::RmcSerialize>::deserialize(
&mut cursor
) else
}.to_tokens(tokens);
let error_msg = LitStr::new(&format!("an error occurred whilest deserializing {}", param_name), Span::call_site());
if self.has_returns {
quote! {
{
log::error!(#error_msg);
return Err(rust_nex::rmc::response::ErrorCode::Core_InvalidArgument);
};
}.to_tokens(tokens)
} else {
quote! {
{
log::error!(#error_msg);
return;
};
}.to_tokens(tokens)
}
}
quote!{
let retval = self.#name
}.to_tokens(tokens);
Paren::default().surround(tokens, |tokens|{
for (paren_name, _) in parameters{
quote!{#paren_name,}.to_tokens(tokens);
}
});
quote!{
.await;
}.to_tokens(tokens);
if *has_returns{
quote!{
let retval = retval?;
let mut vec = Vec::new();
rust_nex::rmc::structures::RmcSerialize::serialize(&retval, &mut vec).ok();
Ok(vec)
}.to_tokens(tokens);
}
})
}
quote!{
async fn rmc_call_proto(
&self,
remote_response_connection: &rust_nex::util::SendingBufferConnection,
method_id: u32,
call_id: u32,
data: Vec<u8>,
)
}.to_tokens(tokens);
Brace::default().surround(tokens, |tokens|{
quote! {
let ret = match method_id
}.to_tokens(tokens);
Brace::default().surround(tokens, |tokens|{
for method in methods{
let ProtoMethodData{
id,
name,
..
} = method;
let raw_name = Ident::new(&format!("raw_{}", name), name.span());
quote!{
#id => self.#raw_name(data).await,
}.to_tokens(tokens);
}
quote!{
v =>
}.to_tokens(tokens);
Brace::default().surround(tokens, |tokens|{
quote!{
log::error!("(protocol {})unimplemented method id called on protocol: {}", #id, v);
}.to_tokens(tokens);
if self.has_returns {
quote! {
Err(rust_nex::rmc::response::ErrorCode::Core_NotImplemented)
}.to_tokens(tokens);
}
});
});
Semi::default().to_tokens(tokens);
if *has_returns{
quote!{
rust_nex::rmc::response::send_result(
remote_response_connection,
ret,
#id,
method_id,
call_id,
).await
}.to_tokens(tokens);
}
});
});
quote!{
impl<T: #name> #raw_name for T{}
}
.to_token_stream()
}.to_tokens(tokens);
}
fn generate_raw_remote_trait(&self) -> TokenStream {
fn generate_raw_remote_trait(&self, tokens: &mut TokenStream) {
let Self {
has_returns,
name,
@ -324,111 +204,126 @@ impl RmcProtocolData {
// this gives us the name which the identifier of the corresponding Raw trait
let remote_name = Ident::new(&format!("Remote{}", name), name.span());
let generate_remote_method = |m: &ProtoMethodData| -> TokenStream {
let ProtoMethodData {
name,
parameters,
ret_val,
attributes,
id: method_id,
} = m;
let params = fold_tokenable(parameters.iter().map(|(ident, ty, attr)| {
let attrs = fold_tokenable(attr.iter());
quote! { #attrs #ident: #ty, }
}));
let optional_questionmark_operator = if self.has_returns {
quote! {
?
}
} else {
quote! {}
};
let param_serialize = fold_tokenable(parameters.iter().map(|(name, ty, attrs)|{
let attrs = fold_tokenable(attrs.iter());
quote!{
#attrs
rnex_core::result::ResultExtension::display_err_or_some(
<#ty as rnex_core::rmc::structures::RmcSerialize>::serialize(
&#name,
&mut cursor
)
).ok_or(rnex_core::rmc::response::ErrorCode::Core_InvalidArgument)#optional_questionmark_operator ;
}
}));
let make_call = if *has_returns {
quote! {
rnex_core::result::ResultExtension::display_err_or_some(
rmc_conn.make_raw_call(&message).await
).ok_or(rnex_core::rmc::response::ErrorCode::Core_Exception)
}
} else {
quote! {
rnex_core::result::ResultExtension::display_err_or_some(
rmc_conn.make_raw_call_no_response(&message).await
);
}
};
let attribs = fold_tokenable(attributes.iter());
quote! {
#attribs
async fn #name(&self, #params) #ret_val{
let mut send_data = ::std::vec::Vec::new();
let mut cursor = ::std::io::Cursor::new(&mut send_data);
#param_serialize
let call_id = rand::random();
let message = rnex_core::rmc::message::RMCMessage{
call_id,
method_id: #method_id,
protocol_id: #proto_id,
rest_of_data: send_data
};
let rmc_conn = <Self as rnex_core::rmc::protocols::HasRmcConnection>::get_connection(self);
#make_call
}
}
};
let remote_methods = fold_tokenable(methods.iter().map(|m| generate_remote_method(m)));
quote! {
// boilerplate tokens which all raw traits need
quote!{
#[doc(hidden)]
#[allow(unused_must_use)]
pub trait #remote_name: rnex_core::rmc::protocols::HasRmcConnection{
#remote_methods
pub trait #remote_name: rust_nex::rmc::protocols::HasRmcConnection
}.to_tokens(tokens);
// generate the body of the raw protocol trait
Brace::default().surround(tokens, |tokens|{
//generate each raw method
for method in methods{
let ProtoMethodData {
name,
parameters,
ret_val,
id: method_id,
..
} = method;
quote!{
async fn #name
}.to_tokens(tokens);
Paren::default().surround(tokens, |tokens|{
quote!{ &self, }.to_tokens(tokens);
for (param_ident, param_type) in parameters{
quote!{ #param_ident: #param_type, }.to_tokens(tokens);
}
});
quote!{
#ret_val
}.to_tokens(tokens);
Brace::default().surround(tokens, |tokens|{
quote! {
let mut send_data = Vec::new();
let mut cursor = ::std::io::Cursor::new(&mut send_data);
}.to_tokens(tokens);
for (param_name, param_type) in parameters{
quote!{
rust_nex::result::ResultExtension::display_err_or_some(
<#param_type as rust_nex::rmc::structures::RmcSerialize>::serialize(
&#param_name,
&mut cursor
)
).ok_or(rust_nex::rmc::response::ErrorCode::Core_InvalidArgument)
}.to_tokens(tokens);
if self.has_returns {
quote! {
?;
}.to_tokens(tokens)
} else {
quote! {
;
}.to_tokens(tokens)
}
}
quote!{
let call_id = rand::random();
let message = rust_nex::rmc::message::RMCMessage{
call_id,
method_id: #method_id,
protocol_id: #proto_id,
rest_of_data: send_data
};
let rmc_conn = <Self as rust_nex::rmc::protocols::HasRmcConnection>::get_connection(self);
}.to_tokens(tokens);
if *has_returns{
quote!{
rust_nex::result::ResultExtension::display_err_or_some(
rmc_conn.make_raw_call(&message).await
).ok_or(rust_nex::rmc::response::ErrorCode::Core_Exception)
}.to_tokens(tokens);
} else {
quote!{
rust_nex::result::ResultExtension::display_err_or_some(
rmc_conn.make_raw_call_no_response(&message).await
);
}.to_tokens(tokens);
}
})
}
}
});
}
fn generate_raw_info(&self) -> TokenStream {
let Self { name, id, .. } = self;
fn generate_raw_info(&self, tokens: &mut TokenStream){
let Self{
name,
id,
..
} = self;
let raw_info_name = Ident::new(&format!("Raw{}Info", name), Span::call_site());
quote! {
quote!{
#[doc(hidden)]
pub struct #raw_info_name;
impl #raw_info_name {
pub const PROTOCOL_ID: u16 = #id;
}
}
}.to_tokens(tokens);
}
}
impl ToTokens for RmcProtocolData {
impl ToTokens for RmcProtocolData{
fn to_tokens(&self, tokens: &mut TokenStream) {
self.generate_raw_trait().to_tokens(tokens);
self.generate_raw_info().to_tokens(tokens);
self.generate_raw_remote_trait().to_tokens(tokens);
self.generate_raw_trait(tokens);
self.generate_raw_info(tokens);
self.generate_raw_remote_trait(tokens);
}
}
}

View file

@ -1,426 +0,0 @@
use proc_macro2::{Literal, Span, TokenStream};
use quote::quote;
use syn::{
bracketed, parse::Parse, punctuated::Punctuated, token::Bracket, DataEnum, DataStruct,
DeriveInput, Field, Fields, Ident, Meta, Token, Variant,
};
use crate::util::fold_tokenable;
struct RmcStructAttrVersion {
bracket: Bracket,
delim: Token![,],
feature_name: Literal,
struct_version: Literal,
}
struct RmcStructAttr {
base_ver: Literal,
versions: Option<(Token![,], Punctuated<RmcStructAttrVersion, Token![,]>)>,
}
impl Parse for RmcStructAttr {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let base_ver = input.parse()?;
if let Some(seperator) = input.parse()? {
let mut punctuated = Punctuated::new();
loop {
punctuated.push_value(input.parse()?);
if let Some(punct) = input.parse()? {
punctuated.push_punct(punct);
} else {
return Ok(Self {
base_ver,
versions: Some((seperator, punctuated)),
});
}
}
} else {
Ok(Self {
base_ver,
versions: None,
})
}
}
}
impl RmcStructAttr {
fn versions(&self) -> impl Iterator<Item = &RmcStructAttrVersion> {
self.versions.iter().flat_map(|v| v.1.iter())
}
}
impl Parse for RmcStructAttrVersion {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let content;
let bracket = bracketed!(content in input);
let (feature_name, delim, struct_version) =
content.call(|s| Ok((s.parse()?, s.parse()?, s.parse()?)))?;
Ok(Self {
bracket,
delim,
feature_name,
struct_version,
})
}
}
pub fn generate_write_size_struct(
s: &DataStruct,
with_potential_header: bool,
) -> proc_macro2::TokenStream {
// this is fine and works because of a quirk where the sizes of the structs dont change
// if we ignore wether or not a struct extends the other struct or has it as a field
let base_size = fold_tokenable(s.fields.iter().map(|f| {
let ident = f.ident.as_ref().unwrap();
let attrs = fold_tokenable(f.attrs.iter().filter(|a| {
if let Some(i) = a.meta.path().get_ident() {
i.to_string() != "extends"
} else {
true
}
}));
quote! {
#attrs
sum += rnex_core::rmc::structures::RmcSerialize::serialize_write_size(&self.#ident)?;
}
}));
let optional_struct_header_calc = if with_potential_header {
quote! { sum += (if rnex_core::config::FEATURE_HAS_STRUCT_HEADER{ 5 } else { 0 }); }
} else {
quote! {}
};
quote! {
let mut sum = 0;
#base_size
#optional_struct_header_calc
Ok(sum)
}
}
pub fn generate_serialize_struct(
extended_struct: Option<&Field>,
elems: &[&Field],
with_header: bool,
) -> proc_macro2::TokenStream {
fn gen_elem_serialize(f: &Field) -> TokenStream {
let ident = f.ident.as_ref().unwrap();
let attrs = fold_tokenable(f.attrs.iter().filter(|a| {
if let Some(i) = a.meta.path().get_ident() {
i.to_string() != "extends"
} else {
true
}
}));
quote! {
#attrs
rnex_core::rmc::structures::RmcSerialize::serialize(&self.#ident, writer)?;
}
}
let optional_extended_struct = if let Some(f) = extended_struct {
gen_elem_serialize(f)
} else {
quote! {}
};
let elems = fold_tokenable(elems.iter().map(|e| gen_elem_serialize(e)));
let ser_body = if with_header {
quote! {
rnex_core::rmc::structures::rmc_struct::write_struct(
writer,
Self::version().unwrap(),
rnex_core::rmc::structures::helpers::len_of_write(
|writer|{
#elems
Ok(())
}
),
|writer|{
#elems
Ok(())
}
)?;
}
} else {
elems
};
quote! {
#optional_extended_struct
#ser_body
Ok(())
}
}
pub fn generate_deserialize_struct(
s: &DataStruct,
extended_struct: Option<&Field>,
elems: &[&Field],
with_header: bool,
) -> proc_macro2::TokenStream {
fn gen_elem_serialize(f: &Field) -> TokenStream {
let ident = f.ident.as_ref().unwrap();
let ty = &f.ty;
let attrs = fold_tokenable(f.attrs.iter().filter(|a| {
if let Some(i) = a.meta.path().get_ident() {
i.to_string() != "extends"
} else {
true
}
}));
quote! {
#attrs
let #ident: #ty = rnex_core::rmc::structures::RmcSerialize::deserialize(reader)?;
}
}
let optional_extended_struct = if let Some(f) = extended_struct {
gen_elem_serialize(f)
} else {
quote! {}
};
let elems = fold_tokenable(elems.iter().map(|e| gen_elem_serialize(e)));
let struct_ctor_content = fold_tokenable(s.fields.iter().map(|f| {
let ident = f.ident.as_ref().unwrap();
let attrs = fold_tokenable(f.attrs.iter().filter(|a| {
if let Some(i) = a.meta.path().get_ident() {
i.to_string() != "extends"
} else {
true
}
}));
quote! { #attrs #ident, }
}));
let de_body_inner = quote! {
#elems
Ok(Self{
#struct_ctor_content
})
};
let de_body = if with_header {
quote! {
Ok(rnex_core::rmc::structures::rmc_struct::read_struct(reader, Self::version().unwrap(), move |mut reader|{
#de_body_inner
})?)
}
} else {
de_body_inner
};
quote! {
#optional_extended_struct
#de_body
}
}
fn generate_struct_version(attr: Option<&RmcStructAttr>) -> proc_macro2::TokenStream {
if let Some(attr) = attr {
let base_ver = &attr.base_ver;
let if_else_chain = fold_tokenable(attr.versions().map(|v| {
let version_val = &v.struct_version;
let feature = &v.feature_name;
quote! {
if cfg!(feature = #feature){
#version_val
} else
}
}));
quote! {
Some(#if_else_chain {
#base_ver
})
}
} else {
quote! { None }
}
}
pub fn rmc_serialize_struct(
s: &DataStruct,
derive_input: &DeriveInput,
) -> (
proc_macro2::TokenStream,
proc_macro2::TokenStream,
Option<proc_macro2::TokenStream>,
Option<proc_macro2::TokenStream>,
) {
let struct_attr = derive_input.attrs.iter().find(|a| {
a.path().segments.len() == 1
&& a.path()
.segments
.first()
.is_some_and(|p| p.ident.to_string() == "rmc_struct")
&& matches!(a.meta, Meta::List(_))
});
let struct_attr: Option<RmcStructAttr> = struct_attr.map(|a| a.parse_args().unwrap());
let struct_attr = struct_attr.as_ref();
let extended_struct = s.fields.iter().find(|f| {
f.attrs.iter().any(|a| {
a.path().segments.len() == 1
&& a.path()
.segments
.first()
.is_some_and(|p| p.ident.to_string() == "extends")
})
});
let elements: Vec<_> = s
.fields
.iter()
.filter(|f| {
!f.attrs.iter().any(|a| {
a.path().segments.len() == 1
&& a.path()
.segments
.first()
.is_some_and(|p| p.ident.to_string() == "extends")
})
})
.collect();
let elements = &elements[..];
let serialize = generate_serialize_struct(extended_struct, elements, struct_attr.is_some());
let deserialize =
generate_deserialize_struct(s, extended_struct, elements, struct_attr.is_some());
let write_size = generate_write_size_struct(s, struct_attr.is_some());
let version = generate_struct_version(struct_attr);
(serialize, deserialize, Some(write_size), Some(version))
}
fn field_to_ident(field: &Field, idx: usize) -> Ident {
if let Some(i) = &field.ident {
i.clone()
} else {
Ident::new(&format!("field_{}", idx), Span::call_site())
}
}
fn variant_to_pattern_and_fields(variant: &Variant) -> (proc_macro2::TokenStream, Vec<Field>) {
match &variant.fields {
Fields::Named(n) => {
let inner = n
.named
.iter()
.map(|f| {
let attrs = fold_tokenable(f.attrs.iter());
let ident = f.ident.as_ref().unwrap();
quote! { #attrs #ident }
})
.reduce(|a, b| quote! {#a, #b});
(quote! {{#inner}}, n.named.iter().cloned().collect())
}
Fields::Unnamed(n) => {
let inner = n
.unnamed
.iter()
.enumerate()
.map(|(i, f)| {
let attrs = fold_tokenable(f.attrs.iter());
let name = field_to_ident(f, i);
quote! { #attrs #name }
})
.reduce(|a, b| quote! {#a, #b});
(quote! {(#inner)}, n.unnamed.iter().cloned().collect())
}
Fields::Unit => (quote! {}, vec![]),
}
}
pub fn rmc_generate_serialize_enum(
enum_data: &DataEnum,
repr_ty: &Ident,
) -> proc_macro2::TokenStream {
let match_content = fold_tokenable(enum_data.variants.iter().map(|v|{
let ident = &v.ident;
let descriminant = &v.discriminant.as_ref().expect("every variant must have a descriminant to be a valid rmc struct").1;
let (pattern, fields) = variant_to_pattern_and_fields(v);
let inner = fold_tokenable(fields.iter().enumerate().map(|(i, f)|{
let ty = &f.ty;
let name = field_to_ident(&f, i);
quote! {<#ty as rnex_core::rmc::structures::RmcSerialize>::serialize(#name, writer)?;}
}));
quote!{
Self::#ident #pattern => {
<#repr_ty as rnex_core::rmc::structures::RmcSerialize>::serialize(&#descriminant, writer)?;
#inner
}
}
}));
quote! {
match self{
#match_content
}
Ok(())
}
}
pub fn rmc_generate_deserialize_enum(
enum_data: &DataEnum,
repr_ty: &Ident,
) -> proc_macro2::TokenStream {
let match_content = fold_tokenable(enum_data.variants.iter().map(|v| {
let ident = &v.ident;
let descriminant = &v
.discriminant
.as_ref()
.expect("every variant must have a descriminant to be a valid rmc struct")
.1;
let (pattern, fields) = variant_to_pattern_and_fields(v);
let inner = fold_tokenable(fields.iter().enumerate().map(|(i, f)| {
let ty = &f.ty;
let name = field_to_ident(&f, i);
quote! {let #name = <#ty as rnex_core::rmc::structures::RmcSerialize>::deserialize(reader)?;}
}));
quote! {
#descriminant => {
#inner
Self::#ident #pattern
}
}
}));
quote! {
let discriminant = <#repr_ty as rnex_core::rmc::structures::RmcSerialize>::deserialize(reader)?;
Ok(match discriminant{
#match_content
v => {
return Err(rnex_core::rmc::structures::Error::UnexpectedValue(v as u64))
}
})
}
}
pub fn rmc_serialize_enum(
enum_data: &DataEnum,
derive_input: &DeriveInput,
) -> (
proc_macro2::TokenStream,
proc_macro2::TokenStream,
Option<proc_macro2::TokenStream>,
Option<proc_macro2::TokenStream>,
) {
let repr_attr = derive_input.attrs.iter().find(|a| {
a.path().segments.len() == 1
&& a.path()
.segments
.first()
.is_some_and(|p| p.ident.to_string() == "repr")
});
let Some(repr_attr) = repr_attr else {
panic!("missing repr attribute");
};
let ty: Ident = repr_attr.parse_args().unwrap();
let serialize = rmc_generate_serialize_enum(&enum_data, &ty);
let deserialize = rmc_generate_deserialize_enum(&enum_data, &ty);
(serialize, deserialize, None, None)
}

View file

@ -1,10 +0,0 @@
use proc_macro2::TokenStream;
use quote::ToTokens;
// todo: return a wrapper struct implementing ToTokens over the iterator instead as to avoid unnescesary allocations with the token stream
pub fn fold_tokenable<T: ToTokens>(list: impl Iterator<Item = T>) -> TokenStream {
list.fold(TokenStream::new(), |mut s, i| {
i.to_tokens(&mut s);
s
})
}

View file

@ -1,11 +0,0 @@
[package]
name = "proxy-common"
version = "0.1.0"
edition = "2024"
[dependencies]
thiserror = "2.0.12"
rnex-core = { path = "../rnex-core", version = "0.1.1" }
tokio = { version = "1.47.0", features = ["full"] }
log = "0.4.25"
hex = "0.4.3"

View file

@ -1,218 +0,0 @@
use log::{error, info};
use rnex_core::{
PID,
executables::common::try_get_ip,
prudp::{socket_addr::PRUDPSockAddr, virtual_port::VirtualPort},
reggie::{RemoteEdgeNodeHolder, UnitPacketWrite},
rmc::{
protocols::{
RemoteDisconnectable, RmcCallable, RmcConnection, RmcPureRemoteObject,
new_rmc_gateway_connection,
},
structures::RmcSerialize,
},
rnex_proxy_common::ConnectionInitData,
util::{SendingBufferConnection, SplittableBufferConnection},
};
use std::{
env::{self, VarError},
error,
net::{AddrParseError, Ipv4Addr, SocketAddr, SocketAddrV4},
ops::Deref,
panic,
str::FromStr,
sync::{Arc, LazyLock},
};
use thiserror::Error;
use tokio::net::TcpStream;
const RNEX_DEFAULT_PORT: u16 = match u16::from_str_radix(env!("RNEX_DEFAULT_PORT"), 10) {
Ok(v) => v,
Err(_) => panic!("unable to get default port from env"),
};
pub const RNEX_ACCESS_KEY: &'static str = env!("RNEX_ACCESS_KEY");
#[derive(Error, Debug)]
pub enum Error {
#[error("error getting environment variable \"{0}\": {1}")]
UnableToGetEnv(&'static str, VarError),
#[error("error parsing ip address environment variable \"{0}\": {1}")]
AddrParse(&'static str, AddrParseError),
#[error(
"error error getting public ip address: \n\tattempted to read from env var \"SERVER_IP_PUBLIC\" and got: {0} \n\tattempted to request from internet and failed with: {1}"
)]
PubAddrGetErr(Box<Self>, Box<dyn error::Error>),
}
impl Into<Error> for (&'static str, AddrParseError) {
fn into(self) -> Error {
Error::AddrParse(self.0, self.1)
}
}
pub struct ProxyStartupParam {
pub forward_destination: SocketAddr,
pub edge_node_holder: SocketAddr,
pub self_public: SocketAddrV4,
pub self_private: SocketAddrV4,
pub virtual_port: VirtualPort,
}
fn try_get_env<T: FromStr>(name: &'static str) -> Result<T, Error>
where
(&'static str, T::Err): Into<Error>,
{
T::from_str(&env::var(name).map_err(|e| Error::UnableToGetEnv(name, e))?)
.map_err(|e| (name, e).into())
}
pub enum ProxyType {
Insecure,
Secure,
}
const VIRTUAL_PORT_INSECURE: LazyLock<VirtualPort> =
LazyLock::new(|| VirtualPort::parse(env!("RNEX_VIRTUAL_PORT_INSECURE")).unwrap());
const VIRTUAL_PORT_SECURE: LazyLock<VirtualPort> =
LazyLock::new(|| VirtualPort::parse(env!("RNEX_VIRTUAL_PORT_SECURE")).unwrap());
impl ProxyStartupParam {
#[inline(always)]
pub fn new(prox_ty: ProxyType) -> Result<Self, Error> {
let port = RNEX_DEFAULT_PORT
+ match prox_ty {
ProxyType::Insecure => 0,
ProxyType::Secure => 1,
};
let self_private = try_get_env("SERVER_IP_PRIVATE")
.unwrap_or(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, port));
let self_public: SocketAddrV4 = match try_get_env("SERVER_IP_PUBLIC") {
Ok(v) => v,
Err(e) => try_get_ip()
.map(|v| SocketAddrV4::new(v, self_private.port()))
.map_err(move |v| Error::PubAddrGetErr(Box::new(e), v))?,
};
Ok(Self {
forward_destination: try_get_env("FORWARD_DESTINATION")?,
edge_node_holder: try_get_env("EDGE_NODE_HOLDER")?,
self_private,
self_public,
virtual_port: match prox_ty {
ProxyType::Insecure => *VIRTUAL_PORT_INSECURE,
ProxyType::Secure => *VIRTUAL_PORT_SECURE,
},
})
}
}
struct OnRemoteDrop<T: RemoteDisconnectable, C: FnOnce() + Send + Sync + 'static>(T, Option<C>);
impl<T: RemoteDisconnectable, C: FnOnce() + Send + Sync + 'static> Deref for OnRemoteDrop<T, C> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
// if we had something like a thread safe OnceConsume (basically the opposite of OnceLock)
// we could make C be an FnOnce
impl<T: RemoteDisconnectable + RmcPureRemoteObject, C: FnOnce() + Send + Sync + 'static>
OnRemoteDrop<T, C>
{
pub fn new(conn: RmcConnection, drop_func: C) -> Self {
Self(T::new(conn), Some(drop_func))
}
#[allow(dead_code)]
pub async fn disconnect(&self) {
self.0.disconnect().await;
}
}
impl<T: RemoteDisconnectable, C: FnOnce() + Send + Sync + 'static> RmcCallable
for OnRemoteDrop<T, C>
{
fn rmc_call(
&self,
_responder: &SendingBufferConnection,
_protocol_id: u16,
_method_id: u32,
_call_id: u32,
_rest: Vec<u8>,
) -> impl Future<Output = ()> + Send {
// maybe respond with not implemented or something
async {}
}
}
impl<T: RemoteDisconnectable, C: FnOnce() + Send + Sync + 'static> Drop for OnRemoteDrop<T, C> {
fn drop(&mut self) {
self.1.take().unwrap()();
}
}
pub async fn setup_edge_node_connection(
param: &ProxyStartupParam,
shutdown_callback: impl FnOnce() + Send + Sync + 'static,
) {
let conn = tokio::net::TcpStream::connect(&param.edge_node_holder)
.await
.unwrap();
let conn: SplittableBufferConnection = conn.into();
conn.send(
rnex_core::reggie::EdgeNodeHolderConnectOption::Register(param.self_public)
.to_data()
.unwrap(),
)
.await;
println!("{:?}", param.self_public);
//leave the inner object floating so that it gets destroyed once we disconnect
new_rmc_gateway_connection(conn, move |r| {
Arc::new(OnRemoteDrop::<RemoteEdgeNodeHolder, _>::new(
r,
shutdown_callback,
))
});
}
pub async fn new_backend_connection(
param: &ProxyStartupParam,
addr: PRUDPSockAddr,
pid: PID,
) -> Option<SplittableBufferConnection> {
info!("attempting to connect to: {}", param.forward_destination);
let mut stream = match TcpStream::connect(param.forward_destination).await {
Ok(v) => v,
Err(e) => {
error!("unable to establish connection to backend: {}", e);
return None;
}
};
let data = ConnectionInitData {
prudpsock_addr: addr,
pid: pid,
}
.to_data()
.unwrap();
if let Err(e) = stream.send_buffer(&data).await {
error!("unable to send establishment data to backend: {}", e);
return None;
};
Some(stream.into())
}
#[cfg(test)]
mod test {
use crate::{VIRTUAL_PORT_INSECURE, VIRTUAL_PORT_SECURE};
#[test]
fn test_virtual_port_correct() {
println!("{:?}", VIRTUAL_PORT_INSECURE);
println!("{:?}", VIRTUAL_PORT_SECURE);
}
}

View file

@ -1,30 +0,0 @@
[package]
name = "proxy"
version = "0.1.0"
edition = "2024"
[dependencies]
tokio = { version = "1.47.0", features = ["full"] }
prudpv0 = { path = "../prudpv0", optional = true }
prudpv1 = { path = "../prudpv1", optional = true }
prudplite = { path = "../prudplite", optional = true }
proxy-common = { path = "../proxy-common" }
cfg-if = "1.0.4"
rnex-core = { path = "../rnex-core", version = "0.1.1" }
log = "0.4.25"
[features]
prudpv0 = ["dep:prudpv0"]
prudpv1 = ["dep:prudpv1"]
prudplite = ["dep:prudplite"]
friends = ["prudpv0", "prudpv0/friends"]
splatoon = ["prudpv1"]
[[bin]]
name = "proxy_insecure"
path = "src/insecure.rs"
[[bin]]
name = "proxy_secure"
path = "src/secure.rs"

View file

@ -1,15 +0,0 @@
use proxy::edge_node_dc_callback;
use proxy_common::{ProxyStartupParam, setup_edge_node_connection};
use rnex_core::common::setup;
#[tokio::main]
async fn main() {
setup();
let param = ProxyStartupParam::new(proxy_common::ProxyType::Insecure)
.expect("unable to get startup parameters");
setup_edge_node_connection(&param, edge_node_dc_callback).await;
proxy::start_insecure(param).await;
}

View file

@ -1,21 +0,0 @@
use std::process::abort;
use cfg_if::cfg_if;
use log::error;
cfg_if! {
if #[cfg(feature = "prudpv0")]{
pub use prudpv0::*;
} else if #[cfg(feature = "prudpv1")] {
pub use prudpv1::*;
} else if #[cfg(feature = "prudplite")]{
pub use prudplite::*;
} else {
compile_error!("no proxy type has been set");
}
}
pub fn edge_node_dc_callback() {
error!("disconnected from node holder, aborting!");
abort()
}

View file

@ -1,14 +0,0 @@
use proxy::edge_node_dc_callback;
use proxy_common::{ProxyStartupParam, setup_edge_node_connection};
use rnex_core::common::setup;
#[tokio::main]
async fn main() {
setup();
let param = ProxyStartupParam::new(proxy_common::ProxyType::Secure)
.expect("unable to get startup parameters");
setup_edge_node_connection(&param, edge_node_dc_callback).await;
proxy::start_secure(param).await;
}

View file

@ -1,18 +0,0 @@
[package]
name = "prudplite"
version = "0.1.0"
edition = "2024"
[features]
nx = []
v4-3-11 = []
[dependencies]
rnex-core = { path = "../rnex-core", version = "0.1.1" }
tokio = { version = "1.47.0", features = ["full"] }
bytemuck = { version = "1.23.1", features = ["derive"] }
proxy-common = {path = "../proxy-common"}
tokio-tungstenite = {version = "0.28.0", features = ["rustls", "rustls-tls-native-roots"]}
log = "0.4.25"
futures-util = "0.3.31"
v-byte-helpers = { git = "https://github.com/RusticMaple/VByteMacros", version = "0.1.1" }

View file

@ -1,14 +0,0 @@
use rnex_core::PID;
use crate::crypto::Crypto;
pub struct Insecure;
impl Crypto for Insecure {
fn new_connection(&self, _data: &[u8]) -> Option<(PID, Vec<u8>)> {
Some((100, vec![]))
}
fn new() -> Self {
Self
}
}

View file

@ -1,9 +0,0 @@
use rnex_core::PID;
pub mod insecure;
pub mod secure;
pub trait Crypto: 'static + Send + Sync {
fn new_connection(&self, data: &[u8]) -> Option<(PID, Vec<u8>)>;
fn new() -> Self;
}

View file

@ -1,27 +0,0 @@
use rnex_core::{
PID, executables::common::SECURE_SERVER_ACCOUNT, nex::account::Account,
prudp::ticket::read_secure_connection_data, rmc::structures::RmcSerialize,
};
use crate::crypto::Crypto;
pub struct Secure(&'static Account);
impl Crypto for Secure {
fn new_connection(&self, data: &[u8]) -> Option<(PID, Vec<u8>)> {
let (_, pid, check_value) = read_secure_connection_data(data, &self.0)?;
let check_value_response = check_value + 1;
let data = bytemuck::bytes_of(&check_value_response);
let mut response = Vec::new();
data.serialize(&mut response).ok()?;
Some((pid, response))
}
fn new() -> Self {
Self(&SECURE_SERVER_ACCOUNT)
}
}

View file

@ -1,45 +0,0 @@
use futures_util::{SinkExt, StreamExt};
use rnex_core::prudp::types_flags::{TypesFlags, flags::NEED_ACK, types::SYN};
use tokio_tungstenite::tungstenite::{Message, client::IntoClientRequest, http::header};
use crate::packet::{LiteHeader, LitePacket, PacketSpecificData, StreamTypes, create_packet_from};
mod packet;
const KEY: &str = "4eb18d39";
const URL: &str = "wss://g2DF33D01-lp1.s.n.srv.nintendo.net";
#[tokio::main]
async fn main() {
let login = URL.into_client_request().unwrap();
let (mut stream, response) = tokio_tungstenite::connect_async(login).await.unwrap();
println!("response: {:?}", response);
let packet = create_packet_from(
LiteHeader {
stream_types: StreamTypes::new(10, 10),
source_port: 1,
destination_port: 1,
fragment_id: 0,
types_flags: TypesFlags::default().types(SYN).flags(NEED_ACK),
sequence_id: 0,
..Default::default()
},
&[PacketSpecificData::SupportedFunctions(0x8)],
&[],
);
println!("sending ack");
stream.send(Message::Binary(packet.into())).await.unwrap();
println!("waiting for response");
let packet = stream.next().await.unwrap();
let Message::Binary(packet) = packet.unwrap() else {
panic!()
};
let packet = LitePacket::new(packet);
let header = packet.header().unwrap();
println!("{:?}", header);
}

View file

@ -1,316 +0,0 @@
pub mod crypto;
mod packet;
use std::{collections::HashMap, net::SocketAddr, sync::Arc};
use crate::{
crypto::{Crypto, insecure::Insecure, secure::Secure},
packet::{LiteHeader, LitePacket, PacketSpecificData, StreamTypes, create_packet_from},
};
use futures_util::{SinkExt, StreamExt};
use log::{error, info, warn};
use proxy_common::{ProxyStartupParam, new_backend_connection};
use rnex_core::{
PID,
prudp::{
socket_addr::PRUDPSockAddr,
types_flags::{
TypesFlags,
flags::{ACK, NEED_ACK, RELIABLE},
types::{CONNECT, DATA, DISCONNECT, PING, SYN},
},
virtual_port::VirtualPort,
},
util::SplittableBufferConnection,
};
use tokio::net::{TcpListener, TcpStream};
use tokio_tungstenite::{
WebSocketStream,
tungstenite::{Bytes, Message},
};
struct ConnectionState {
param: Arc<ProxyStartupParam>,
active: bool,
websocket: WebSocketStream<TcpStream>,
#[allow(dead_code)]
pid: PID,
backend_conn: SplittableBufferConnection,
addr: PRUDPSockAddr,
incoming_reliable: HashMap<u16, LitePacket<Bytes>>,
client_reliable_counter: u16,
#[allow(dead_code)]
server_reliable_counter: u16,
}
impl ConnectionState {
pub async fn handle_incoming_prudp(&mut self, packet: LitePacket<Bytes>, sorted: bool) {
let Some(header) = packet.header() else {
warn!("invalid data on connection");
return;
};
if (header.types_flags.get_flags() & NEED_ACK) != 0 {
let data = create_packet_from(
LiteHeader {
stream_types: StreamTypes::new(
self.param.virtual_port.get_stream_type(),
self.addr.virtual_port.get_stream_type(),
),
source_port: self.param.virtual_port.get_port_number(),
destination_port: self.addr.virtual_port.get_port_number(),
fragment_id: header.fragment_id,
types_flags: TypesFlags::default()
.types(header.types_flags.get_types())
.flags(ACK),
sequence_id: header.sequence_id,
..Default::default()
},
&[],
&[],
);
let data: Bytes = data.into();
if header.types_flags.get_types() == DISCONNECT {
self.websocket
.send(Message::Binary(data.clone()))
.await
.ok();
self.websocket
.send(Message::Binary(data.clone()))
.await
.ok();
}
self.websocket.send(Message::Binary(data)).await.ok();
}
if (header.types_flags.get_flags() & ACK) != 0 {
// we can just safely ignore acks, we ARE sending over tcp after all already guarantees that our packets will arrive
// we can however not guarantee the order of incoming client packets so we should still take care of that
// (the client might be doing some funny things which we dont know of)
return;
}
if (header.types_flags.get_flags() & RELIABLE != 0) & !sorted {
self.incoming_reliable.insert(header.sequence_id, packet);
if self.incoming_reliable.len() > 5 {
self.active = false;
warn!("client is spamming out of order reliable packets, throwing out");
}
return;
}
match header.types_flags.get_types() {
DATA => {
if header.fragment_id != 0 {
warn!("fragmented packets arent yet supported");
return;
}
let Some(payload) = packet.payload() else {
return;
};
self.backend_conn.send(payload.into()).await;
}
PING => {}
v => {
info!("unimplemented packet type: {}", v);
}
}
}
#[allow(dead_code)]
pub async fn process_reliable(&mut self) {
while let Some(v) = self.incoming_reliable.remove(&self.client_reliable_counter) {
self.handle_incoming_prudp(v, true).await;
self.client_reliable_counter += 1;
}
}
pub async fn handle_connection(&mut self) {
while self.active {
tokio::select! {
v = self.websocket.next() => {
match v {
Some(Ok(Message::Binary(v))) => {
self.handle_incoming_prudp(LitePacket::new(v), false).await;
}
_ => {
info!("client disconnected or errored out");
return;
}
}
}
_ = self.backend_conn.recv() => {
}
}
}
}
}
pub async fn websocket_thread_unconnected<C: Crypto>(
param: Arc<ProxyStartupParam>,
crypto: Arc<C>,
conn: TcpStream,
addr: SocketAddr,
) {
let mut websocket = match tokio_tungstenite::accept_async(conn).await {
Ok(v) => v,
Err(e) => {
error!("error accepting websocket connection: {}", e);
return;
}
};
while let Some(Ok(v)) = websocket.next().await {
match v {
Message::Binary(b) => {
let packet = LitePacket::new(b);
let Some(header) = packet.header() else {
error!("got malformed message, disconnecting");
return;
};
match header.types_flags.get_types() {
SYN => {
let Some(supported) = packet.packet_specific_iter() else {
error!("got malformed message, disconnecting");
return;
};
let Some(PacketSpecificData::SupportedFunctions(s)) = supported
.into_iter()
.find(|v| matches!(v, PacketSpecificData::SupportedFunctions(_)))
else {
error!("got malformed message, disconnecting");
return;
};
let data = create_packet_from(
LiteHeader {
destination_port: header.source_port,
source_port: param.virtual_port.get_port_number(),
stream_types: StreamTypes::new(
param.virtual_port.get_stream_type(),
header.stream_types.source(),
),
fragment_id: 0,
sequence_id: 0,
types_flags: TypesFlags::default().types(SYN).flags(ACK),
..Default::default()
},
&[
PacketSpecificData::SupportedFunctions(s & 0xFF),
PacketSpecificData::ConnectionSignature([0; 16]),
],
&[],
);
websocket.send(Message::Binary(data.into())).await.ok();
}
CONNECT => {
let Some(supported) = packet.packet_specific_iter() else {
error!("got malformed message, disconnecting");
return;
};
let Some(PacketSpecificData::SupportedFunctions(s)) = supported
.into_iter()
.find(|v| matches!(v, PacketSpecificData::SupportedFunctions(_)))
else {
error!("got malformed message, disconnecting");
return;
};
let Some(data) = packet.payload() else {
error!("got malformed message, disconnecting");
return;
};
let Some((pid, data)) = crypto.new_connection(data) else {
error!("invalid login data");
return;
};
let data = create_packet_from(
LiteHeader {
destination_port: header.source_port,
source_port: param.virtual_port.get_port_number(),
stream_types: StreamTypes::new(
param.virtual_port.get_stream_type(),
header.stream_types.source(),
),
fragment_id: 0,
sequence_id: 0,
types_flags: TypesFlags::default().types(CONNECT).flags(ACK),
..Default::default()
},
&[
PacketSpecificData::SupportedFunctions(s & 0xFF),
PacketSpecificData::ConnectionSignature([0; 16]),
],
&data,
);
websocket.send(Message::Binary(data.into())).await.ok();
let addr = PRUDPSockAddr::new(
addr,
VirtualPort::new(header.source_port, header.stream_types.source()),
);
let Some(backend_conn) = new_backend_connection(&param, addr, pid).await
else {
error!("unable to connect to backend");
return;
};
let mut connection = ConnectionState {
active: true,
addr,
pid,
backend_conn,
client_reliable_counter: 2,
server_reliable_counter: 1,
param,
incoming_reliable: HashMap::new(),
websocket,
};
connection.handle_connection().await;
break;
}
v => {
error!(
"invalid packet type for unconnected client {}, disconnecting",
v,
);
}
}
}
v => {
error!("non binary message({:?}) , disconnecting", v);
return;
}
}
}
}
pub async fn start_proxy<C: Crypto>(param: ProxyStartupParam) {
let param = Arc::new(param);
let crypto = Arc::new(C::new());
let listener = TcpListener::bind(param.self_private)
.await
.expect("unable to bind to port");
while let Ok((connection, addr)) = listener.accept().await {
let param = param.clone();
let crypto = crypto.clone();
tokio::spawn(websocket_thread_unconnected(
param, crypto, connection, addr,
));
}
}
pub async fn start_secure(param: ProxyStartupParam) {
start_proxy::<Secure>(param).await;
}
pub async fn start_insecure(param: ProxyStartupParam) {
start_proxy::<Insecure>(param).await;
}

View file

@ -1,222 +0,0 @@
use std::{
fmt::Debug,
io::{self, Cursor, Read, Write},
};
use bytemuck::{Pod, Zeroable, bytes_of_mut};
use rnex_core::prudp::types_flags::TypesFlags;
use v_byte_helpers::{IS_BIG_ENDIAN, ReadExtensions};
#[derive(Pod, Zeroable, Copy, Clone, Default, Debug)]
#[repr(C)]
pub struct LiteHeader {
pub magic: u8,
pub packet_specific_length: u8,
pub payload_size: u16,
pub stream_types: StreamTypes,
pub source_port: u8,
pub destination_port: u8,
pub fragment_id: u8,
pub types_flags: TypesFlags,
pub sequence_id: u16,
}
pub enum PacketSpecificData {
SupportedFunctions(u32),
ConnectionSignature([u8; 16]),
LiteSignature([u8; 16]),
}
impl PacketSpecificData {
fn consume(reader: &mut impl Read) -> io::Result<Self> {
let mut option_id = 0u8;
reader.read_exact(bytes_of_mut(&mut option_id))?;
let mut size = 0u8;
reader.read_exact(bytes_of_mut(&mut size))?;
match option_id {
0 => {
if size != 4 {
Err(io::Error::other(
"invalid option size for supported functions",
))
} else {
Ok(Self::SupportedFunctions(reader.read_le_u32()?))
}
}
1 => {
if size != 16 {
Err(io::Error::other(
"invalid option size for connection signature",
))
} else {
Ok(Self::ConnectionSignature(
reader.read_struct(IS_BIG_ENDIAN)?,
))
}
}
0x80 => {
if size != 16 {
Err(io::Error::other("invalid option size for lite signature"))
} else {
Ok(Self::LiteSignature(reader.read_struct(IS_BIG_ENDIAN)?))
}
}
_ => Err(io::Error::other("invalid option id")),
}
}
fn write_size(&self) -> usize {
2 + match self {
PacketSpecificData::SupportedFunctions(_) => 4,
Self::ConnectionSignature(_) => 16,
Self::LiteSignature(_) => 16,
}
}
fn write_self(&self, writer: &mut impl Write) -> io::Result<()> {
match self {
PacketSpecificData::SupportedFunctions(v) => {
writer.write_all(&[0, 4])?;
writer.write_all(&v.to_le_bytes())?;
}
Self::ConnectionSignature(v) => {
writer.write_all(&[1, 16])?;
writer.write_all(&v[..])?;
}
Self::LiteSignature(v) => {
writer.write_all(&[0x80, 16])?;
writer.write_all(&v[..])?;
}
}
Ok(())
}
}
pub struct LitePacket<T: AsRef<[u8]>>(T);
pub struct PacketSpecificIter<'a>(Cursor<&'a [u8]>);
impl<'a> Iterator for PacketSpecificIter<'a> {
type Item = PacketSpecificData;
fn next(&mut self) -> Option<Self::Item> {
PacketSpecificData::consume(&mut self.0).ok()
}
}
impl<T: AsRef<[u8]>> LitePacket<T> {
pub fn new(inner: T) -> Self {
Self(inner)
}
pub fn header(&self) -> Option<&LiteHeader> {
bytemuck::try_from_bytes(self.0.as_ref().get(..size_of::<LiteHeader>())?).ok()
}
pub fn header_mut(&mut self) -> Option<&mut LiteHeader>
where
T: AsMut<[u8]>,
{
bytemuck::try_from_bytes_mut(self.0.as_mut().get_mut(..size_of::<LiteHeader>())?).ok()
}
pub fn payload(&self) -> Option<&[u8]> {
let header = self.header()?;
self.0
.as_ref()
.get(size_of::<LiteHeader>() + header.packet_specific_length as usize..)
}
pub fn payload_mut(&mut self) -> Option<&mut [u8]>
where
T: AsMut<[u8]>,
{
let len = self.header()?.packet_specific_length;
self.0
.as_mut()
.get_mut(size_of::<LiteHeader>() + len as usize..)
}
pub fn packet_specific_raw(&self) -> Option<&[u8]> {
let header = self.header()?;
self.0.as_ref().get(
size_of::<LiteHeader>()
..size_of::<LiteHeader>() + header.packet_specific_length as usize,
)
}
pub fn packet_specific_raw_mut(&mut self) -> Option<&mut [u8]>
where
T: AsMut<[u8]>,
{
let len = self.header()?.packet_specific_length;
self.0
.as_mut()
.get_mut(size_of::<LiteHeader>()..size_of::<LiteHeader>() + len as usize)
}
pub fn packet_specific_iter<'a>(&'a self) -> Option<PacketSpecificIter<'a>> {
self.packet_specific_raw()
.map(Cursor::new)
.map(PacketSpecificIter)
}
}
pub fn create_packet_from(
header: LiteHeader,
specific_data: &[PacketSpecificData],
data: &[u8],
) -> Vec<u8> {
let specific_size: usize = specific_data.iter().map(|v| v.write_size()).sum();
let mut packet = LitePacket::new(vec![
0u8;
size_of::<LiteHeader>() + specific_size + data.len()
]);
*packet.header_mut().expect("packet malformed in creation") = LiteHeader {
magic: 0x80,
packet_specific_length: specific_size as u8,
payload_size: data.len() as u16,
..header
};
let mut cursor = Cursor::new(
packet
.packet_specific_raw_mut()
.expect("packet malformed in creation"),
);
for specific in specific_data {
specific.write_self(&mut cursor).unwrap();
}
packet
.payload_mut()
.expect("packet malformed in creation")
.copy_from_slice(data);
packet.0
}
#[derive(Pod, Zeroable, Copy, Clone, Default)]
#[repr(transparent)]
pub struct StreamTypes(u8);
impl Debug for StreamTypes {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "({},{})", self.source(), self.destination())
}
}
impl StreamTypes {
pub fn new(source_stream: u8, dest_stream: u8) -> Self {
Self((source_stream & 0xF << 4) & dest_stream & 0xF)
}
pub fn source(&self) -> u8 {
self.0 >> 4
}
pub fn destination(&self) -> u8 {
self.0 & 0xF
}
}

View file

@ -1,20 +0,0 @@
[package]
name = "prudpv0"
version = "0.1.0"
edition = "2024"
[dependencies]
rnex-core = { path = "../rnex-core", version = "0.1.1" }
tokio = { version = "1.47.0", features = ["full"] }
bytemuck = { version = "1.23.1", features = ["derive"] }
typenum = "1.18.0"
rc4 = "0.1.0"
log = "0.4.25"
cfg-if = "1.0.4"
proxy-common = {path = "../proxy-common"}
hmac = "0.12.1"
md-5 = "^0.10.6"
[features]
prudpv0 = []
friends = ["prudpv0"]

View file

@ -1,54 +0,0 @@
trait IterExtra: Iterator {
fn sum_wrapping_u8(&mut self) -> u8
where
Self::Item: Into<u8>;
fn sum_wrapping_u32(&mut self) -> u32
where
Self::Item: Into<u32>;
}
impl<T: Iterator> IterExtra for T {
fn sum_wrapping_u8(&mut self) -> u8
where
Self::Item: Into<u8>,
{
let mut sum = 0u8;
for v in self {
let val: u8 = v.into();
sum = sum.wrapping_add(val);
}
sum
}
fn sum_wrapping_u32(&mut self) -> u32
where
Self::Item: Into<u32>,
{
let mut sum = 0u32;
for v in self {
let val: u32 = v.into();
sum = sum.wrapping_add(val);
}
sum
}
}
#[inline(always)]
pub fn common_checksum(access_key: &str, data: &[u8]) -> u8 {
let leftover = data.len() % 4;
let word_sum = bytemuck::cast_slice::<_, u32>(&data[..data.len() - leftover])
.iter()
.copied()
.sum_wrapping_u32();
let checksum = access_key.as_bytes().iter().copied().sum_wrapping_u8();
let checksum = checksum.wrapping_add(
(&data[data.len() - leftover..])
.iter()
.copied()
.sum_wrapping_u8(),
);
let checksum = checksum.wrapping_add(word_sum.to_ne_bytes().into_iter().sum_wrapping_u8());
checksum
}

View file

@ -1,6 +0,0 @@
use hmac::Hmac;
use md5::Md5;
use proxy_common::RNEX_ACCESS_KEY;
pub const ACCESS_KEY: &str = RNEX_ACCESS_KEY;
pub type HmacMd5 = Hmac<Md5>;

View file

@ -1,79 +0,0 @@
use std::io::Write;
use hmac::Mac;
use md5::{Digest, Md5};
use rc4::{KeyInit, Rc4, StreamCipher};
use rnex_core::prudp::{
encryption::{DEFAULT_KEY, EncryptionPair},
types_flags::{TypesFlags, types::DATA},
};
use typenum::U5;
use crate::crypto::{
Crypto, CryptoInstance,
common_crypto::common_checksum,
friends_common::{ACCESS_KEY, HmacMd5},
};
pub struct InsecureInstance {
pair: EncryptionPair<Rc4<U5>>,
self_signat: [u8; 4],
#[allow(dead_code)]
remote_signat: [u8; 4],
}
impl CryptoInstance for InsecureInstance {
fn decrypt_incoming(&mut self, data: &mut [u8]) {
self.pair.recv.apply_keystream(data);
}
fn encrypt_outgoing(&mut self, data: &mut [u8]) {
self.pair.send.apply_keystream(data);
}
fn get_user_id(&self) -> u32 {
0
}
fn generate_signature(&self, types_flags: TypesFlags, data: &[u8]) -> [u8; 4] {
if types_flags.get_types() == DATA {
if data.len() == 0 {
[0x78, 0x56, 0x34, 0x12]
} else {
let mut hash = Md5::new();
hash.write(ACCESS_KEY.as_bytes()).unwrap();
let mut hmac = <HmacMd5 as Mac>::new_from_slice(&hash.finalize().as_slice())
.expect("unable to create hmac md5");
hmac.update(data);
hmac.finalize().into_bytes()[0..4].try_into().unwrap()
}
} else {
self.self_signat
}
}
}
pub struct Insecure();
impl Crypto for Insecure {
type Instance = InsecureInstance;
fn new() -> Self {
Self()
}
fn calculate_checksum(&self, data: &[u8]) -> u8 {
common_checksum(ACCESS_KEY, data)
}
fn instantiate(
&self,
_packet_data: &[u8],
self_signat: [u8; 4],
remote_signat: [u8; 4],
) -> Option<(Self::Instance, Vec<u8>)> {
Some((
InsecureInstance {
pair: EncryptionPair::init_both(|| Rc4::new(&DEFAULT_KEY)),
self_signat,
remote_signat,
},
vec![],
))
}
}

View file

@ -1,97 +0,0 @@
use hmac::Mac;
use md5::{Digest, Md5};
use rc4::{KeyInit, Rc4, StreamCipher};
use rnex_core::{
executables::common::SECURE_SERVER_ACCOUNT,
nex::account::Account,
prudp::{
encryption::EncryptionPair,
ticket::read_secure_connection_data,
types_flags::{TypesFlags, types::DATA},
},
rmc::structures::RmcSerialize,
};
use std::io::Write;
use typenum::U16;
use crate::crypto::{
Crypto, CryptoInstance,
common_crypto::common_checksum,
friends_common::{ACCESS_KEY, HmacMd5},
};
pub struct SecureInstance {
pair: EncryptionPair<Rc4<U16>>,
uid: u32,
self_signat: [u8; 4],
#[allow(dead_code)]
remote_signat: [u8; 4],
}
impl CryptoInstance for SecureInstance {
fn decrypt_incoming(&mut self, data: &mut [u8]) {
self.pair.recv.apply_keystream(data);
}
fn encrypt_outgoing(&mut self, data: &mut [u8]) {
self.pair.send.apply_keystream(data);
}
fn get_user_id(&self) -> u32 {
self.uid
}
fn generate_signature(&self, types_flags: TypesFlags, data: &[u8]) -> [u8; 4] {
if types_flags.get_types() == DATA {
if data.len() == 0 {
[0x78, 0x56, 0x34, 0x12]
} else {
let mut hash = Md5::new();
hash.write(ACCESS_KEY.as_bytes()).unwrap();
let mut hmac = <HmacMd5 as Mac>::new_from_slice(&hash.finalize().as_slice())
.expect("unable to create hmac md5");
hmac.update(data);
hmac.finalize().into_bytes()[0..4].try_into().unwrap()
}
} else {
self.self_signat
}
}
}
pub struct Secure(&'static Account);
impl Crypto for Secure {
type Instance = SecureInstance;
fn new() -> Self {
Self(&SECURE_SERVER_ACCOUNT)
}
fn calculate_checksum(&self, data: &[u8]) -> u8 {
common_checksum(ACCESS_KEY, data)
}
fn instantiate(
&self,
data: &[u8],
self_signat: [u8; 4],
remote_signat: [u8; 4],
) -> Option<(Self::Instance, Vec<u8>)> {
let (session_key, pid, check_value) = read_secure_connection_data(data, &self.0)?;
let check_value_response = check_value + 1;
let data = bytemuck::bytes_of(&check_value_response);
let mut response = Vec::new();
data.serialize(&mut response).ok()?;
Some((
SecureInstance {
pair: EncryptionPair::init_both(|| {
Rc4::new_from_slice(&session_key).expect("unable to initialize rc4 stream")
}),
self_signat,
remote_signat,
uid: pid,
},
response,
))
}
}

View file

@ -1,9 +0,0 @@
use crate::crypto::Crypto;
pub struct Insecure();
impl Crypto for Insecure {
fn calculate_checksum(&self, data: &[u8]) -> u8 {
todo!()
}
}

View file

@ -1,38 +0,0 @@
use cfg_if::cfg_if;
use rnex_core::prudp::types_flags::TypesFlags;
mod common_crypto;
pub trait CryptoInstance: Send + 'static {
fn decrypt_incoming(&mut self, data: &mut [u8]);
fn encrypt_outgoing(&mut self, data: &mut [u8]);
fn generate_signature(&self, types_flags: TypesFlags, data: &[u8]) -> [u8; 4];
fn get_user_id(&self) -> u32;
}
pub trait Crypto: Send + Sync + 'static {
type Instance: CryptoInstance;
fn new() -> Self;
fn calculate_checksum(&self, data: &[u8]) -> u8;
fn instantiate(
&self,
data: &[u8],
self_signat: [u8; 4],
remote_signat: [u8; 4],
) -> Option<(Self::Instance, Vec<u8>)>;
}
cfg_if! {
if #[cfg(feature = "friends")]{
pub mod friends_common;
pub mod friends_insecure;
pub use friends_insecure::*;
pub mod friends_secure;
pub use friends_secure::*;
} else {
pub mod secure;
pub use secure::*;
pub mod insecure;
pub use insecure::*;
}
}

View file

@ -1,9 +0,0 @@
use crate::crypto::Crypto;
pub struct Secure();
impl Crypto for Secure {
fn calculate_checksum(&self, data: &[u8]) -> u8 {
todo!()
}
}

View file

@ -1,50 +0,0 @@
use cfg_if::cfg_if;
cfg_if! {
if #[cfg(feature = "prudpv0")] {
use log::info;
use proxy_common::ProxyStartupParam;
use std::env;
use std::net::SocketAddrV4;
use std::sync::{Arc, LazyLock};
use crate::crypto::{Crypto, Insecure, Secure};
use crate::server::Server;
mod crypto;
mod packet;
mod server;
pub static EDGE_NODE_HOLDER: LazyLock<SocketAddrV4> = LazyLock::new(|| {
env::var("EDGE_NODE_HOLDER")
.ok()
.and_then(|s| s.parse().ok())
.expect("EDGE_NODE_HOLDER not set")
});
pub static FORWARD_DESTINATION: LazyLock<SocketAddrV4> = LazyLock::new(|| {
env::var("FORWARD_DESTINATION")
.ok()
.and_then(|s| s.parse().ok())
.expect("FORWARD_DESTINATION not set")
});
//same as with prudpv1 this is responsible for handeling the different cryptography
//implementations, e.g. secure and insecure(this also includes special cases like friends)
async fn start_proxy<T: Crypto>(param: ProxyStartupParam) {
info!("binding to socket");
let server: Arc<Server<T>> = Arc::new(Server::new(param).await);
info!("waiting on packets");
server.run_task().await;
}
pub async fn start_secure(param: ProxyStartupParam) {
start_proxy::<Secure>(param).await;
}
pub async fn start_insecure(param: ProxyStartupParam) {
start_proxy::<Insecure>(param).await;
}
}
}

View file

@ -1,405 +0,0 @@
use bytemuck::{Pod, Zeroable, try_from_bytes, try_from_bytes_mut};
use log::{info, warn};
use rnex_core::prudp::{
types_flags::{
TypesFlags,
flags::HAS_SIZE,
types::{CONNECT, DATA, DISCONNECT, PING, SYN},
},
virtual_port::VirtualPort,
};
use crate::crypto::{Crypto, CryptoInstance};
#[repr(C, packed)]
#[derive(Clone, Copy, Pod, Zeroable, Debug)]
pub struct PRUDPV0Header {
pub source: VirtualPort,
pub destination: VirtualPort,
pub type_flags: TypesFlags,
pub session_id: u8,
pub packet_signature: [u8; 4],
pub sequence_id: u16,
}
#[repr(transparent)]
pub struct PRUDPV0Packet<T: AsRef<[u8]>>(pub T);
impl<T: AsRef<[u8]>> PRUDPV0Packet<T> {
#[inline(always)]
pub fn get_packet_specific_size(&self) -> Option<usize> {
Some(get_types_flags_size_from_types_flags(
self.header()?.type_flags,
))
}
#[inline(always)]
pub fn header(&self) -> Option<&PRUDPV0Header> {
try_from_bytes(self.0.as_ref().get(..size_of::<PRUDPV0Header>())?).ok()
}
#[inline(always)]
pub fn header_mut(&mut self) -> Option<&mut PRUDPV0Header>
where
T: AsMut<[u8]>,
{
try_from_bytes_mut(self.0.as_mut().get_mut(..size_of::<PRUDPV0Header>())?).ok()
}
#[inline(always)]
pub fn connection_signature(&self) -> Option<&[u8; 4]> {
let offset = size_of::<PRUDPV0Header>();
Some(self.0.as_ref().get(offset..offset + 4)?.try_into().ok()?)
}
#[inline(always)]
pub fn connection_signature_mut(&mut self) -> Option<&mut [u8; 4]>
where
T: AsMut<[u8]>,
{
let offset = size_of::<PRUDPV0Header>();
Some(
self.0
.as_mut()
.get_mut(offset..offset + 4)?
.try_into()
.ok()?,
)
}
#[inline(always)]
pub fn size_mut(&mut self) -> Option<&mut [u8]>
where
T: AsMut<[u8]>,
{
if self.header()?.type_flags.get_flags() & HAS_SIZE == 0 {
return None;
}
let offset = size_of::<PRUDPV0Header>() + get_type_specific_size(self.header()?.type_flags);
Some(self.0.as_mut().get_mut(offset..offset + 2)?)
}
#[inline(always)]
pub fn fragment_id_mut(&mut self) -> Option<&mut u8>
where
T: AsMut<[u8]>,
{
if self.header()?.type_flags.get_types() != DATA {
return None;
}
let offset = size_of::<PRUDPV0Header>();
Some(self.0.as_mut().get_mut(offset)?)
}
#[inline(always)]
pub fn fragment_id(&self) -> Option<&u8> {
if self.header()?.type_flags.get_types() != DATA {
return None;
}
let offset = size_of::<PRUDPV0Header>();
Some(self.0.as_ref().get(offset)?)
}
#[inline(always)]
fn get_payload_offset(&self) -> Option<usize> {
Some(size_of::<PRUDPV0Header>() + self.get_packet_specific_size()?)
}
#[inline(always)]
pub fn payload(&self) -> Option<&[u8]> {
self.0
.as_ref()
.get(self.get_payload_offset()?..(self.0.as_ref().len().saturating_sub(1)))
}
#[inline(always)]
pub fn payload_mut(&mut self) -> Option<&mut [u8]>
where
T: AsMut<[u8]>,
{
let start_offset = self.get_payload_offset()?;
let end_offset = self.0.as_ref().len().saturating_sub(1);
self.0.as_mut().get_mut(start_offset..end_offset)
}
#[inline(always)]
pub fn checksummed_data(&self) -> Option<&[u8]> {
self.0
.as_ref()
.get(..self.0.as_ref().len().saturating_sub(1))
}
#[inline(always)]
pub fn checksum(&self) -> Option<u8> {
self.0.as_ref().last().copied()
}
#[inline(always)]
pub fn checksum_mut(&mut self) -> Option<&mut u8>
where
T: AsMut<[u8]>,
{
self.0.as_mut().last_mut()
}
#[inline(always)]
pub fn check_checksum(&self, crypto: &impl Crypto) -> bool {
let Some(data) = self.checksummed_data() else {
return false;
};
let Some(checksum) = self.checksum() else {
return false;
};
if checksum != crypto.calculate_checksum(data) {
warn!(
"checksum doesnt match expected checksum: {} != {}",
checksum,
crypto.calculate_checksum(data)
)
}
checksum == crypto.calculate_checksum(data)
}
pub fn new(data: T) -> Self {
Self(data)
}
}
const DEFAULT_SIGNAT: [u8; 4] = [0x12, 0x34, 0x56, 0x78];
#[inline(always)]
#[allow(dead_code)]
const fn get_size_offset(tf: TypesFlags) -> usize {
size_of::<PRUDPV0Header>()
+ (if tf.get_types() & (SYN | CONNECT) != 0 {
4
} else if tf.get_types() & DATA != 0 {
1
} else {
0
})
}
#[inline(always)]
const fn get_type_specific_size(tf: TypesFlags) -> usize {
if tf.get_types() == SYN || tf.get_types() == CONNECT {
4
} else if tf.get_types() & DATA != 0 {
1
} else {
0
}
}
#[inline(always)]
const fn get_types_flags_size_from_types_flags(tf: TypesFlags) -> usize {
get_type_specific_size(tf) + (if tf.get_flags() & HAS_SIZE != 0 { 2 } else { 0 })
}
#[inline(always)]
pub const fn precalc_size(tf: TypesFlags, payload_size: usize) -> usize {
size_of::<PRUDPV0Header>() + get_types_flags_size_from_types_flags(tf) + payload_size + 1
}
pub fn new_syn_packet(
flags: u16,
source: VirtualPort,
destination: VirtualPort,
signat: [u8; 4],
crypto: &impl Crypto,
) -> Vec<u8> {
let type_flags = TypesFlags::default().types(SYN).flags(flags);
let vec = vec![0; precalc_size(type_flags, 0)];
let mut packet = PRUDPV0Packet::new(vec);
let header = packet.header_mut().expect("packet malformed in creation");
*header = PRUDPV0Header {
destination,
source,
packet_signature: DEFAULT_SIGNAT,
sequence_id: 0,
session_id: 0,
type_flags,
};
*packet
.connection_signature_mut()
.expect("packet malformed in creation") = signat;
*packet.checksum_mut().expect("packet malformed in creation") = crypto.calculate_checksum(
packet
.checksummed_data()
.expect("packet malformed in creation"),
);
packet.0
}
pub fn new_connect_packet(
flags: u16,
source: VirtualPort,
destination: VirtualPort,
self_signat: [u8; 4],
remote_signat: [u8; 4],
session_id: u8,
data: &[u8],
crypto: &impl Crypto,
) -> Vec<u8> {
let type_flags = TypesFlags::default().types(CONNECT).flags(flags);
let vec = vec![0; precalc_size(type_flags, data.len())];
let mut packet = PRUDPV0Packet::new(vec);
let header = packet.header_mut().expect("packet malformed in creation");
*header = PRUDPV0Header {
destination,
source,
packet_signature: self_signat,
sequence_id: 1,
session_id,
type_flags,
};
*packet
.connection_signature_mut()
.expect("packet malformed in creation") = remote_signat;
packet
.payload_mut()
.expect("packet malformed in creation")
.copy_from_slice(data);
if let Some(size) = packet.size_mut() {
size.copy_from_slice(&(data.len() as u16).to_le_bytes());
}
*packet.checksum_mut().expect("packet malformed in creation") = crypto.calculate_checksum(
packet
.checksummed_data()
.expect("packet malformed in creation"),
);
info!("header: {:?}", packet.header());
packet.0
}
pub fn new_data_packet(
flags: u16,
source: VirtualPort,
destination: VirtualPort,
data: &[u8],
sequence_id: u16,
session_id: u8,
frag_id: u8,
crypto_instance: &mut impl CryptoInstance,
crypto: &impl Crypto,
) -> Vec<u8> {
let type_flags = TypesFlags::default().types(DATA).flags(flags);
let vec = vec![0; precalc_size(type_flags, data.len())];
let mut packet = PRUDPV0Packet::new(vec);
packet
.header_mut()
.expect("packet malformed in creation")
.type_flags = type_flags;
packet
.payload_mut()
.expect("packet malformed in creation")
.copy_from_slice(data);
crypto_instance.encrypt_outgoing(packet.payload_mut().expect("packet malformed in creation"));
if let Some(size) = packet.size_mut() {
size.copy_from_slice(&(data.len() as u16).to_le_bytes());
}
*packet
.fragment_id_mut()
.expect("packet malformed in creation") = frag_id;
let packet_signature = crypto_instance.generate_signature(
type_flags,
packet.payload().expect("packet malformed in creation"),
);
let header = packet.header_mut().expect("packet malformed in creation");
*header = PRUDPV0Header {
destination,
source,
packet_signature,
sequence_id,
session_id,
type_flags,
};
*packet.checksum_mut().expect("packet malformed in creation") = crypto.calculate_checksum(
packet
.checksummed_data()
.expect("packet malformed in creation"),
);
info!("header: {:?}", packet.header());
packet.0
}
pub fn new_ping_packet(
flags: u16,
source: VirtualPort,
destination: VirtualPort,
sequence_id: u16,
session_id: u8,
crypto_instance: &mut impl CryptoInstance,
crypto: &impl Crypto,
) -> Vec<u8> {
let type_flags = TypesFlags::default().types(PING).flags(flags);
let vec = vec![0; precalc_size(type_flags, 0)];
let mut packet = PRUDPV0Packet::new(vec);
let packet_signature = crypto_instance.generate_signature(type_flags, &[]);
let header = packet.header_mut().expect("packet malformed in creation");
*header = PRUDPV0Header {
destination,
source,
packet_signature,
sequence_id,
session_id,
type_flags,
};
*packet.checksum_mut().expect("packet malformed in creation") = crypto.calculate_checksum(
packet
.checksummed_data()
.expect("packet malformed in creation"),
);
packet.0
}
pub fn new_disconnect_packet(
flags: u16,
source: VirtualPort,
destination: VirtualPort,
sequence_id: u16,
session_id: u8,
crypto_instance: &mut impl CryptoInstance,
crypto: &impl Crypto,
) -> Vec<u8> {
let type_flags = TypesFlags::default().types(DISCONNECT).flags(flags);
let vec = vec![0; precalc_size(type_flags, 0)];
let mut packet = PRUDPV0Packet::new(vec);
let packet_signature = crypto_instance.generate_signature(type_flags, &[]);
let header = packet.header_mut().expect("packet malformed in creation");
*header = PRUDPV0Header {
destination,
source,
packet_signature,
sequence_id,
session_id,
type_flags,
};
*packet.checksum_mut().expect("packet malformed in creation") = crypto.calculate_checksum(
packet
.checksummed_data()
.expect("packet malformed in creation"),
);
info!("header: {:?}", packet.header());
packet.0
}

View file

@ -1,536 +0,0 @@
use std::{
collections::HashMap,
net::{SocketAddr, SocketAddrV4},
sync::{Arc, Weak},
time::Duration,
};
use log::{error, info, warn};
use proxy_common::{ProxyStartupParam, new_backend_connection};
use rnex_core::{
prudp::{
socket_addr::PRUDPSockAddr,
types_flags::{
flags::{ACK, NEED_ACK, RELIABLE},
types::{CONNECT, DATA, DISCONNECT, PING, SYN},
},
},
util::{SendingBufferConnection, SplittableBufferConnection},
};
use tokio::{
net::UdpSocket,
spawn,
sync::{Mutex, RwLock},
time::{Instant, sleep},
};
use crate::{
crypto::{Crypto, CryptoInstance},
packet::{
PRUDPV0Packet, new_connect_packet, new_data_packet, new_disconnect_packet, new_ping_packet,
new_syn_packet,
},
};
pub struct InternalConnection<C: CryptoInstance> {
last_action: Instant,
crypto_instance: C,
server_packet_counter: u16,
client_packet_counter: u16,
unacknowledged_packets: HashMap<u16, Arc<Vec<u8>>>,
packet_queue: HashMap<u16, (Instant, PRUDPV0Packet<Vec<u8>>)>,
}
pub struct Connection<C: CryptoInstance> {
session_id: u8,
target: SendingBufferConnection,
addr: PRUDPSockAddr,
inner: Mutex<InternalConnection<C>>,
}
impl<C: CryptoInstance> InternalConnection<C> {
#[allow(dead_code)]
fn next_server_count(&mut self) -> u16 {
let prev_val = self.server_packet_counter;
let (val, _) = self.server_packet_counter.overflowing_add(1);
self.server_packet_counter = val;
prev_val
}
}
pub struct Server<C: Crypto> {
param: ProxyStartupParam,
socket: UdpSocket,
crypto: C,
connections: RwLock<HashMap<(PRUDPSockAddr, u8), Arc<Connection<C::Instance>>>>,
}
impl<C: Crypto> Server<C> {
async fn send_data_packet(self: Arc<Self>, conn: Arc<Connection<C::Instance>>, data: &[u8]) {
/*let type_flags = TypesFlags::default().types(DATA).flags(HAS_SIZE | NEED_ACK);
let vec = vec![0; precalc_size(type_flags, data.len())];
let mut packet = PRUDPV0Packet::new(vec);
let payload = packet.payload_mut().expect("packet malformed in creation");
payload.copy_from_slice(data);
let mut inner = conn.inner.lock().await;
inner.crypto_instance.encrypt_outgoing(payload);
let packet_signat = inner.crypto_instance.generate_signature(payload);
let seq = inner.next_server_count();
*packet.header_mut().expect("packet malformed in creation") = PRUDPV0Header {
source: self.param.virtual_port,
destination: conn.addr.virtual_port,
type_flags,
session_id: conn.session_id,
packet_signature: packet_signat,
sequence_id: seq,
};
/* we leave the sequence id as is for now as it defaults to 0 */
packet.checksum_mut().expect("packet malformed in creation") =
self.crypto.calculate_checksum(
packet
.checksummed_data()
.expect("packet malformed in creation"),
);*/
let mut inner = conn.inner.lock().await;
let pieces = data.chunks(700);
let max_piece = pieces.len() - 1;
let mut frag_num = 1;
for (i, piece) in pieces.enumerate() {
let seq = inner.server_packet_counter;
let packet = new_data_packet(
NEED_ACK | RELIABLE,
(&self).param.virtual_port,
conn.addr.virtual_port,
piece,
inner.server_packet_counter,
conn.session_id,
if i == max_piece { 0 } else { frag_num },
&mut inner.crypto_instance,
&(&self).crypto,
);
inner.server_packet_counter += 1;
let packet = Arc::new(packet);
let packet_ref = Arc::downgrade(&packet);
inner.unacknowledged_packets.insert(seq, packet);
let conn = Arc::downgrade(&conn);
let this = Arc::downgrade(&self);
spawn(async move {
sleep(Duration::from_millis(i as u64 * 16)).await;
for n in 0..5 {
let Some(data) = packet_ref.upgrade() else {
return;
};
let Some(conn) = conn.upgrade() else {
return;
};
let Some(this) = this.upgrade() else {
return;
};
info!("send attempt {}", n);
this.socket
.send_to(&data, conn.addr.regular_socket_addr)
.await
.ok();
break;
}
});
frag_num += 1;
}
drop(inner);
}
async fn connection_thread(
self: Arc<Self>,
conn: Weak<Connection<C::Instance>>,
mut recv: SplittableBufferConnection,
) {
while let Some(data) = recv.recv().await {
let Some(conn) = conn.upgrade() else { break };
if &data[..] == &[0, 0, 0, 0, 0] {
info!("got keepalive");
continue;
}
info!("got data from server: {:?}", data);
self.clone().send_data_packet(conn.clone(), &data).await;
}
}
async fn timeout_thread(self: Arc<Self>, conn: Weak<Connection<C::Instance>>) {
loop {
let Some(conn) = conn.upgrade() else { break };
sleep(Duration::from_secs(3)).await;
let mut inner = conn.inner.lock().await;
if (Instant::now() - inner.last_action).as_secs() > 5 {
warn!("connection exceeded silence limit, sending ping");
let packet = new_ping_packet(
NEED_ACK,
self.param.virtual_port,
conn.addr.virtual_port,
0,
conn.session_id,
&mut inner.crypto_instance,
&self.crypto,
);
self.socket
.send_to(&packet, conn.addr.regular_socket_addr)
.await
.ok();
}
if (Instant::now() - inner.last_action).as_secs() > 15 {
warn!("client timed out...");
let packet = new_disconnect_packet(
NEED_ACK,
self.param.virtual_port,
conn.addr.virtual_port,
0,
conn.session_id,
&mut inner.crypto_instance,
&self.crypto,
);
self.socket
.send_to(&packet, conn.addr.regular_socket_addr)
.await
.ok();
self.socket
.send_to(&packet, conn.addr.regular_socket_addr)
.await
.ok();
self.socket
.send_to(&packet, conn.addr.regular_socket_addr)
.await
.ok();
drop(inner);
let mut conns = self.connections.write().await;
conns.remove(&(conn.addr, conn.session_id));
drop(conns);
break;
}
drop(inner);
}
}
async fn handle_syn(self: Arc<Self>, packet: PRUDPV0Packet<Vec<u8>>, addr: PRUDPSockAddr) {
info!("got syn");
let header = packet.header().unwrap();
let signat = addr.calculate_connection_signature();
let signat = [signat[0], signat[1], signat[2], signat[3]];
let packet = new_syn_packet(ACK, header.destination, header.source, signat, &self.crypto);
self.socket
.send_to(&packet, addr.regular_socket_addr)
.await
.ok();
}
async fn handle_connect(self: Arc<Self>, packet: PRUDPV0Packet<Vec<u8>>, addr: PRUDPSockAddr) {
let Some(data) = packet.payload() else {
warn!("malformed packet from: {:?}", addr.regular_socket_addr);
return;
};
let Some(self_signat) = packet.connection_signature().copied() else {
warn!(
"malformed packet(unable to find connection signature) from: {:?}",
addr
);
return;
};
let remote_signat = addr.calculate_connection_signature();
let remote_signat = [
remote_signat[0],
remote_signat[1],
remote_signat[2],
remote_signat[3],
];
let Some((ci, data)) = self.crypto.instantiate(&data, self_signat, remote_signat) else {
warn!("unable to instantiate crypto instance");
return;
};
let pid = ci.get_user_id();
println!("user with pid {} is connecting", pid);
let buf_conn = new_backend_connection(&self.param, addr, pid).await;
let Some(buf_conn) = buf_conn else {
error!("unable to connect to backend");
return;
};
let header = packet.header().expect("header should be validated by now");
let conn = Arc::new(Connection {
target: buf_conn.duplicate_sender(),
addr,
session_id: header.session_id,
inner: Mutex::new(InternalConnection {
last_action: Instant::now(),
crypto_instance: ci,
client_packet_counter: 2,
server_packet_counter: 1,
unacknowledged_packets: HashMap::new(),
packet_queue: HashMap::new(),
}),
});
let mut conns = self.connections.write().await;
if conns.contains_key(&(addr, header.session_id)) {
error!("client already connected but tried to connect again");
}
conns.insert((addr, header.session_id), conn.clone());
drop(conns);
spawn({
let this = self.clone();
let conn = Arc::downgrade(&conn);
this.connection_thread(conn, buf_conn)
});
spawn({
let this = self.clone();
let conn = Arc::downgrade(&conn);
this.timeout_thread(conn)
});
let packet = new_connect_packet(
ACK,
header.destination,
header.source,
self_signat,
remote_signat,
packet.header().unwrap().session_id,
&data,
&self.crypto,
);
info!("sending back connection accept");
self.socket
.send_to(&packet, addr.regular_socket_addr)
.await
.ok();
}
async fn handle_data(self: Arc<Self>, packet: PRUDPV0Packet<Vec<u8>>, addr: PRUDPSockAddr) {
let Some(frag_id) = packet.fragment_id() else {
warn!("invalid packet from: {:?}", addr);
return;
};
let Some(header) = packet.header() else {
warn!("invalid packet from: {:?}", addr);
return;
};
let Some(res) = self.get_connection((addr, header.session_id)).await else {
warn!("data packet on inactive connection from: {:?}", addr);
return;
};
info!("frag: {}", frag_id);
let mut conn = res.inner.lock().await;
let ack = new_data_packet(
ACK,
self.param.virtual_port,
res.addr.virtual_port,
&[],
header.sequence_id,
header.session_id,
*frag_id,
&mut conn.crypto_instance,
&self.crypto,
);
self.socket
.send_to(&ack, addr.regular_socket_addr)
.await
.ok();
conn.packet_queue.insert(
packet.header().unwrap().sequence_id,
(Instant::now(), packet),
);
while let Some((_, mut packet)) = {
let ctr = conn.client_packet_counter;
let packet = conn.packet_queue.remove(&ctr);
packet
} {
info!("processing packet: {}", conn.client_packet_counter);
let Some(payload) = packet.payload_mut() else {
//todo: at this point the stream would have been broken, we should probably disconnect the client
warn!("invalid packet from: {:?}", addr);
return;
};
conn.crypto_instance.decrypt_incoming(payload);
res.target.send(payload.to_owned()).await;
conn.client_packet_counter += 1;
}
info!("finished handeling packets, dropping inner connection");
drop(conn);
}
async fn handle_ping(self: Arc<Self>, packet: PRUDPV0Packet<Vec<u8>>, addr: PRUDPSockAddr) {
info!("got ping");
let header = packet.header().unwrap();
let Some(conn) = self.get_connection((addr, header.session_id)).await else {
warn!("ping on inactive connection: {:?}", addr);
return;
};
let mut inner = conn.inner.lock().await;
let packet = new_ping_packet(
ACK,
self.param.virtual_port,
addr.virtual_port,
header.sequence_id,
header.session_id,
&mut inner.crypto_instance,
&self.crypto,
);
drop(inner);
self.socket
.send_to(&packet, addr.regular_socket_addr)
.await
.ok();
}
async fn handle_disconnect(
self: Arc<Self>,
packet: PRUDPV0Packet<Vec<u8>>,
addr: PRUDPSockAddr,
) {
info!("got disconnect");
let header = packet.header().unwrap();
let Some(conn) = self.get_connection((addr, header.session_id)).await else {
warn!("ping on inactive connection: {:?}", addr);
return;
};
let mut inner = conn.inner.lock().await;
let packet = new_disconnect_packet(
ACK,
self.param.virtual_port,
addr.virtual_port,
header.sequence_id,
header.session_id,
&mut inner.crypto_instance,
&self.crypto,
);
drop(inner);
let mut conns = self.connections.write().await;
conns.remove(&(addr, header.session_id));
drop(conns);
self.socket
.send_to(&packet, addr.regular_socket_addr)
.await
.ok();
self.socket
.send_to(&packet, addr.regular_socket_addr)
.await
.ok();
self.socket
.send_to(&packet, addr.regular_socket_addr)
.await
.ok();
}
async fn get_connection(
&self,
addr: (PRUDPSockAddr, u8),
) -> Option<Arc<Connection<C::Instance>>> {
let rd = self.connections.read().await;
let res = rd.get(&addr).cloned();
drop(rd);
res
}
async fn process_packet(self: Arc<Self>, packet: PRUDPV0Packet<Vec<u8>>, addr: SocketAddrV4) {
if !packet.check_checksum(&self.crypto) {
warn!("invalid checksum from: {}", addr);
return;
}
let Some(header) = packet.header() else {
warn!("malformatted packet from: {}", addr);
return;
};
info!("len: {}", packet.0.len());
let addr = PRUDPSockAddr::new(SocketAddr::V4(addr), header.source);
if let Some(conn) = self.get_connection((addr, header.session_id)).await {
let mut inner = conn.inner.lock().await;
inner.last_action = Instant::now();
drop(inner);
};
if header.type_flags.get_flags() & ACK != 0 {
info!("got ack(acks are ignored for now)");
return;
}
println!("{:?}", header);
match header.type_flags.get_types() {
SYN => {
self.handle_syn(packet, addr).await;
}
CONNECT => {
self.handle_connect(packet, addr).await;
}
DATA => {
self.handle_data(packet, addr).await;
}
PING => {
self.handle_ping(packet, addr).await;
}
DISCONNECT => {
self.handle_disconnect(packet, addr).await;
}
v => {
println!("unimplemented packed type: {}", v);
}
}
}
pub async fn run_task(self: Arc<Self>) {
loop {
let mut vec: Vec<u8> = vec![0u8; 65507];
let (len, addr) = match self.socket.recv_from(&mut vec).await {
Err(e) => {
error!("unable to recv: {}", e);
break;
}
Ok(v) => v,
};
let this = self.clone();
tokio::spawn(async move {
let mut data = vec;
data.resize(len, 0);
let packet = PRUDPV0Packet::new(data);
let SocketAddr::V4(addr) = addr else {
unreachable!()
};
this.process_packet(packet, addr).await;
});
}
}
pub async fn new(param: ProxyStartupParam) -> Self {
let socket = UdpSocket::bind(param.self_private)
.await
.expect("unable to bind socket");
Self {
socket,
crypto: C::new(),
connections: RwLock::new(HashMap::new()),
param,
}
}
}

View file

@ -1,23 +0,0 @@
[package]
name = "prudpv1"
version = "0.1.0"
edition = "2024"
[dependencies]
bytemuck = { version = "1.23.1", features = ["derive"] }
tokio = { version = "1.47.0", features = ["full"] }
hmac = "0.12.1"
md-5 = "^0.10.6"
rc4 = "0.1.0"
v-byte-helpers = { git = "https://github.com/RusticMaple/VByteMacros", version = "0.1.1" }
thiserror = "2.0.12"
log = "0.4.27"
async-trait = "0.1.88"
typenum = "1.18.0"
once_cell = "1.21.3"
rnex-core = { path = "../rnex-core", version = "0.1.1" }
proxy-common = {path = "../proxy-common"}
cfg-if = "1.0.4"
[features]
prudpv1 = []

View file

@ -1,2 +0,0 @@
pub mod proxy_insecure;
pub mod proxy_secure;

View file

@ -1,87 +0,0 @@
use crate::prudp::router::Router;
use crate::prudp::unsecure::Unsecure;
use log::error;
use proxy_common::{ProxyStartupParam, RNEX_ACCESS_KEY};
use rnex_core::prudp::virtual_port::VirtualPort;
use rnex_core::reggie::UnitPacketRead;
use rnex_core::reggie::UnitPacketWrite;
use rnex_core::rmc::structures::RmcSerialize;
use rnex_core::rnex_proxy_common::ConnectionInitData;
use std::time::Duration;
use tokio::net::TcpStream;
use tokio::task;
use tokio::time::sleep;
pub async fn start(param: ProxyStartupParam) {
let (router_secure, _) = Router::new(param.self_private)
.await
.expect("unable to start router");
let mut socket_secure = router_secure
.add_socket(VirtualPort::new(1, 10), Unsecure(RNEX_ACCESS_KEY))
.await
.expect("unable to add socket");
loop {
let Some(mut conn) = socket_secure.accept().await else {
error!("server crashed");
return;
};
task::spawn(async move {
let mut stream = match TcpStream::connect(param.forward_destination).await {
Ok(v) => v,
Err(e) => {
error!("unable to connect: {}", e);
return;
}
};
if let Err(e) = stream
.send_buffer(
&ConnectionInitData {
prudpsock_addr: conn.socket_addr,
pid: conn.user_id,
}
.to_data()
.unwrap(),
)
.await
{
error!("error connecting to backend: {}", e);
return;
};
loop {
tokio::select! {
data = conn.recv() => {
let Some(data) = data else {
return;
};
if let Err(e) = stream.send_buffer(&data[..]).await{
error!("error sending data to backend: {}", e);
return;
}
},
data = stream.read_buffer() => {
let data = match data{
Ok(d) => d,
Err(e) => {
error!("error reveiving data from backend: {}", e);
return;
}
};
if conn.send(data).await == None{
return;
}
},
_ = sleep(Duration::from_secs(10)) => {
conn.send([0,0,0,0,0].to_vec()).await;
}
}
}
});
}
}

View file

@ -1,110 +0,0 @@
use crate::prudp::router::Router;
use crate::prudp::secure::Secure;
use log::error;
use log::warn;
use proxy_common::{ProxyStartupParam, RNEX_ACCESS_KEY};
use rnex_core::executables::common::SECURE_SERVER_ACCOUNT;
use rnex_core::prudp::virtual_port::VirtualPort;
use rnex_core::reggie::UnitPacketRead;
use rnex_core::reggie::UnitPacketWrite;
use rnex_core::rmc::structures::RmcSerialize;
use rnex_core::rnex_proxy_common::ConnectionInitData;
use std::time::Duration;
use tokio::net::TcpStream;
use tokio::task;
use tokio::time::sleep;
pub async fn start(param: ProxyStartupParam) {
let (router_secure, _) = Router::new(param.self_private)
.await
.expect("unable to start router");
let mut socket_secure = router_secure
.add_socket(
VirtualPort::new(1, 10),
Secure(RNEX_ACCESS_KEY, SECURE_SERVER_ACCOUNT.clone()),
)
.await
.expect("unable to add socket");
loop {
let Some(mut conn) = socket_secure.accept().await else {
error!("server crashed");
return;
};
task::spawn(async move {
let Ok(mut c) = rnex_core::grpc::account::Client::new().await else {
error!("failed to initialize gql client");
return;
};
let v = match c.get_user_level(conn.user_id).await {
Ok(v) => v,
Err(e) => {
error!("failed to get user level: {}", e);
return;
}
};
if v < 0 {
warn!("person with too low account level joined");
return;
}
let mut stream = match TcpStream::connect(param.forward_destination).await {
Ok(v) => v,
Err(e) => {
error!("unable to connect: {}", e);
return;
}
};
if let Err(e) = stream
.send_buffer(
&ConnectionInitData {
prudpsock_addr: conn.socket_addr,
pid: conn.user_id,
}
.to_data()
.unwrap(),
)
.await
{
error!("error connecting to backend: {}", e);
return;
};
loop {
tokio::select! {
data = conn.recv() => {
let Some(data) = data else {
return;
};
if let Err(e) = stream.send_buffer(&data[..]).await{
error!("error sending data to backend: {}", e);
return;
}
},
data = stream.read_buffer() => {
let data = match data{
Ok(d) => d,
Err(e) => {
error!("error reveiving data from backend: {}", e);
return;
}
};
if conn.send(data).await == None{
return;
}
},
_ = sleep(Duration::from_secs(10)) => {
conn.send([0,0,0,0,0].to_vec()).await;
}
}
}
});
}
}

View file

@ -1,14 +0,0 @@
cfg_if::cfg_if! {
if #[cfg(feature = "prudpv1")]{
use proxy_common::ProxyStartupParam;
pub mod executables;
pub mod prudp;
pub async fn start_secure(param: ProxyStartupParam) {
executables::proxy_secure::start(param).await;
}
pub async fn start_insecure(param: ProxyStartupParam) {
executables::proxy_insecure::start(param).await;
}
}
}

View file

@ -1,12 +0,0 @@
//! # PRUDPV1 Feature sanity checks
//!
//! Checks wether the set features are actually sensical or wether they are
//! nonsense and throws a compiler error incase of nonsense
#[cfg(feature = "friends")]
compile_error!(
"friends uses prudpv0 instead of prudpv1, please do not enable both at the same time"
);
#[cfg(feature = "prudpv0")]
compile_error!("you cannot enable two prudp versions at the same time");

View file

@ -1,74 +0,0 @@
[package]
name = "rnex-core"
version = "0.1.1"
edition = "2024"
[dependencies]
bytemuck = { version = "1.21.0", features = ["derive"] }
dotenv = "0.15.0"
once_cell = "1.20.2"
rc4 = "0.1.0"
thiserror = "2.0.11"
v-byte-helpers = { git = "https://github.com/RusticMaple/VByteMacros", version = "0.1.1" }
simplelog = "0.12.2"
chrono = "0.4.39"
log = "0.4.25"
rand = "0.8.5"
cfg-if = "1.0.4"
hmac = "0.12.1"
md-5 = "^0.10.6"
tokio = { version = "1.43.0", features = ["full"] }
hex = "0.4.3"
macros = { path = "../macros" }
paste = "1.0.15"
typenum = "1.18.0"
json = "0.12.4"
anyhow = "1.0.100"
ureq = { version = "3.1.4", features = [ "json" ] }
serde = { version = "1.0.228", features = [ "derive" ] }
serde_json = "1.0.149"
sqlx = { version = "0.8.6", optional = true, features = ["postgres", "runtime-tokio", "chrono", "time"] }
aws-sdk-s3 = { version = "1.129.0", optional = true }
aws-config = { version = "1.8.15", optional = true }
base64 = "0.22.1"
sha2 = "0.10.9"
urlencoding = "2.1.3"
futures = "0.3.32"
[dev-dependencies]
# criterion = "0.7.0"
[features]
rmc_struct_header = []
guest_login = []
friends = ["guest_login", "database-support"]
big_pid = []
v3-3-2 = []
third-notif-param = []
v3-4-0 = ["v3-3-2", "third-notif-param", "rmc_struct_header"]
v3-5-0 = ["v3-4-0"]
v3-8-15 = ["v3-5-0"]
v3-10-22 = ["v3-8-15"]
v4-3-11 = ["v3-8-15"]
nx = ["big_pid"]
splatoon = []
datastore = ["database-support", "v3-8-15", "dep:aws-sdk-s3", "dep:aws-config"]
database-support = ["dep:sqlx"]
[[bench]]
name = "rmc_serialization"
harness = false
[[bin]]
name = "backend_server_insecure"
path = "src/executables/backend_server_insecure.rs"
[[bin]]
name = "backend_server_secure"
path = "src/executables/backend_server_secure.rs"
[[bin]]
name = "edge_node_holder_server"
path = "src/executables/edge_node_holder_server.rs"

View file

@ -1,99 +0,0 @@
use std::hint::black_box;
use std::io::Cursor;
use std::ops::Deref;
use criterion::{criterion_group, criterion_main, Criterion};
use once_cell::sync::Lazy;
use rnex_core::kerberos::KerberosDateTime;
use rnex_core::rmc::structures::matchmake::{AutoMatchmakeParam, Gathering, MatchmakeParam, MatchmakeSession, MatchmakeSessionSearchCriteria};
use rnex_core::rmc::structures::RmcSerialize;
use rnex_core::rmc::structures::variant::Variant;
static DUMMY: Lazy<AutoMatchmakeParam> = Lazy::new(|| AutoMatchmakeParam{
additional_participants: vec![1,2,3,4],
auto_matchmake_option: 10,
gid_for_participation_check: 9,
join_message: "hi".to_string(),
participation_count: 32,
target_gids: vec![45,2,51,1,1,1,1],
search_criteria: vec![MatchmakeSessionSearchCriteria{
attribs: vec!["hi".to_string(), "ig".to_string(), "gotta put data here".to_string()],
exclude_locked: true,
exclude_non_host_pid: false,
exclude_system_password_set: true,
exclude_user_password_set: false,
game_mode: "some gamemode".to_string(),
matchmake_param: MatchmakeParam{
params: vec![
("SR".to_string(), Variant::Bool(true)),
("SR2".to_string(), Variant::Double(1.0)),
("SR3".to_string(), Variant::SInt64(42)),
("SR4".to_string(), Variant::String("test".to_string()))
]
},
matchmake_system_type: "some type".to_string(),
maximum_participants: "???".to_string(),
minimum_participants: "-99".to_string(),
refer_gid: 123,
selection_method: 9999999,
vacant_only: true,
vacant_participants: 1000
}],
matchmake_session: MatchmakeSession{
refer_gid: 10,
matchmake_system_type: 139,
matchmake_param: MatchmakeParam{
params: vec![
("QSR".to_string(), Variant::Bool(false)),
("SRQ2".to_string(), Variant::Double(1.1)),
("SQR3".to_string(), Variant::SInt64(422)),
("SDR4".to_string(), Variant::String("tetst".to_string()))
]
},
participation_count: 99,
application_buffer: vec![1,2,3,4,5,6,7,8,9],
attributes: vec![10,20,99,100000],
datetime: KerberosDateTime::now(),
gamemode: 111,
open_participation: false,
option0: 100,
progress_score: 1,
system_password_enabled: false,
user_password: "aaa".to_string(),
session_key: vec![91,123,5,2,1,2,4,124,4],
user_password_enabled: false,
gathering: Gathering{
minimum_participants: 1,
maximum_participants: 12,
description: "aaargh".to_string(),
flags: 100,
host_pid: 999999919,
owner_pid: 138830,
participant_policy: 1,
policy_argument: 99837,
self_gid: 129,
state: 1389488
}
}
});
static DUMMY_SER: Lazy<Vec<u8>> = Lazy::new(|| serialize_to_vec(DUMMY.deref()));
fn serialize_to_vec(r: &impl RmcSerialize) -> Vec<u8>{
let mut vec = r.to_data();
vec.unwrap()
}
fn read_struct<T: RmcSerialize>(r: &[u8]) -> T{
T::deserialize(&mut Cursor::new(r)).unwrap()
}
fn matchmake_with_param(c: &mut Criterion) {
let raw = DUMMY.deref();
let ser = DUMMY_SER.deref().as_slice();
c.bench_function("mmparam: ser", |b| b.iter(move || serialize_to_vec(black_box(raw))));
c.bench_function("mmparam: de", |b| b.iter(move || read_struct::<AutoMatchmakeParam>(black_box(ser))));
}
criterion_group!(benches, matchmake_with_param);
criterion_main!(benches);

View file

@ -1,47 +0,0 @@
use once_cell::sync::Lazy;
use rnex_core::common::setup;
use rnex_core::executables::common::{SECURE_SERVER_ACCOUNT, new_simple_backend};
use rnex_core::nex::auth_handler::AuthHandler;
use rnex_core::reggie::EdgeNodeHolderConnectOption::DontRegister;
use rnex_core::reggie::RemoteEdgeNodeHolder;
use rnex_core::rmc::protocols::{OnlyRemote, new_rmc_gateway_connection};
use rnex_core::rmc::structures::RmcSerialize;
use rnex_core::util::SplittableBufferConnection;
use std::env;
use std::net::SocketAddrV4;
use std::sync::Arc;
use tokio::net::TcpStream;
pub static FORWARD_EDGE_NODE_HOLDER: Lazy<SocketAddrV4> = Lazy::new(|| {
env::var("FORWARD_EDGE_NODE_HOLDER")
.ok()
.and_then(|s| Some(s.parse().unwrap()))
.expect("FORWARD_EDGE_NODE_HOLDER not set")
});
#[tokio::main]
async fn main() {
setup();
let conn = TcpStream::connect(&*FORWARD_EDGE_NODE_HOLDER)
.await
.unwrap();
let conn: SplittableBufferConnection = conn.into();
conn.send(DontRegister.to_data().unwrap()).await;
let conn = new_rmc_gateway_connection(conn, |r| {
Arc::new(OnlyRemote::<RemoteEdgeNodeHolder>::new(r))
});
new_simple_backend(move |_, _| {
let controller = conn.clone();
Arc::new(AuthHandler {
destination_server_acct: &SECURE_SERVER_ACCOUNT,
build_name: env!("AUTH_REPORT_VERSION"),
control_server: controller,
})
})
.await;
}

View file

@ -1,30 +0,0 @@
use cfg_if::cfg_if;
use rnex_core::common::setup;
#[tokio::main]
async fn main() {
setup();
cfg_if! {
if #[cfg(feature = "friends")]{
use rnex_core::executables::friends_backend::start_friends_backend;
start_friends_backend().await;
} else if #[cfg(feature = "datastore")] {
use rnex_core::executables::common::DB_POOL;
use sqlx::PgPool;
let database_url = std::env::var("RNEX_DATASTORE_DATABASE_URL")
.expect("RNEX_DATASTORE_DATABASE_URL must be set");
let pool = PgPool::connect(&database_url)
.await
.expect("Failed to create pool");
DB_POOL.set(pool).expect("failed to set global DB_POOL");
use rnex_core::executables::regular_backend;
regular_backend::start_regular_backend().await
} else {
use rnex_core::executables::regular_backend;
regular_backend::start_regular_backend().await
}
}
}

View file

@ -1,116 +0,0 @@
use once_cell::sync::Lazy;
use rnex_core::nex::account::Account;
use rnex_core::rmc::protocols::{RmcCallable, RmcConnection, new_rmc_gateway_connection};
use rnex_core::rmc::structures::RmcSerialize;
use rnex_core::rnex_proxy_common::ConnectionInitData;
use std::env;
use std::io::Cursor;
use std::net::{Ipv4Addr, SocketAddrV4};
use std::sync::Arc;
use tokio::net::TcpListener;
cfg_if! {
if #[cfg(feature = "datastore")] {
use sqlx::postgres::PgPool;
}
}
use crate::reggie::UnitPacketRead;
use cfg_if::cfg_if;
use log::error;
use std::error::Error;
const IP_REQ_SERVICE_URL: &str = "https://ipinfo.io/ip";
cfg_if! {
if #[cfg(feature = "datastore")] {
use std::sync::{LazyLock, OnceLock};
pub static RNEX_DATASTORE_DATABASE_URL: LazyLock<String> = LazyLock::new(|| {
std::env::var("RNEX_DATASTORE_DATABASE_URL")
.expect("RNEX_DATASTORE_DATABASE_URL must be set")
});
pub static DB_POOL: OnceLock<PgPool> = OnceLock::new();
pub fn get_db() -> &'static PgPool {
DB_POOL.get().expect("db_pool not initialized")
}
pub static RNEX_DATASTORE_S3_ENDPOINT: LazyLock<String> = LazyLock::new(|| {
std::env::var("RNEX_DATASTORE_S3_ENDPOINT")
.expect("RNEX_DATASTORE_S3_ENDPOINT must be set")
});
pub static RNEX_DATASTORE_S3_BUCKET: LazyLock<String> = LazyLock::new(|| {
std::env::var("RNEX_DATASTORE_S3_BUCKET")
.expect("RNEX_DATASTORE_S3_BUCKET must be set")
});
}
}
pub fn try_get_ip() -> Result<Ipv4Addr, Box<dyn Error>> {
let mut req = ureq::get(IP_REQ_SERVICE_URL).call()?;
Ok(req.body_mut().read_to_string()?.parse()?)
}
pub static OWN_IP_PRIVATE: Lazy<Ipv4Addr> = Lazy::new(|| {
env::var("SERVER_IP")
.ok()
.map(|s| s.parse().expect("invalid ip address"))
.unwrap_or(Ipv4Addr::UNSPECIFIED)
});
pub static OWN_IP_PUBLIC: Lazy<Ipv4Addr> = Lazy::new(|| {
env::var("SERVER_IP_PUBLIC")
.ok()
.map(|s| s.parse().expect("invalid ip address"))
.unwrap_or_else(|| try_get_ip().unwrap())
});
pub static SERVER_PORT: Lazy<u16> = Lazy::new(|| {
env::var("SERVER_PORT")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(10000)
});
pub static KERBEROS_SERVER_PASSWORD: Lazy<String> = Lazy::new(|| {
env::var("AUTH_SERVER_PASSWORD")
.ok()
.unwrap_or("password".to_owned())
});
pub static AUTH_SERVER_ACCOUNT: Lazy<Account> =
Lazy::new(|| Account::new(1, "Quazal Authentication", &KERBEROS_SERVER_PASSWORD));
pub static SECURE_SERVER_ACCOUNT: Lazy<Account> =
Lazy::new(|| Account::new(2, "Quazal Rendez-Vous", &KERBEROS_SERVER_PASSWORD));
pub async fn new_simple_backend<T: RmcCallable + Sync + Send + 'static, F>(mut creation_function: F)
where
F: FnMut(ConnectionInitData, RmcConnection) -> Arc<T>,
{
let listen = TcpListener::bind(SocketAddrV4::new(*OWN_IP_PRIVATE, *SERVER_PORT))
.await
.unwrap();
while let Ok((mut stream, _addr)) = listen.accept().await {
let buffer = match stream.read_buffer().await {
Ok(v) => v,
Err(e) => {
error!(
"an error ocurred whilest reading connection data buffer: {:?}",
e
);
continue;
}
};
let user_connection_data = ConnectionInitData::deserialize(&mut Cursor::new(buffer));
let user_connection_data = match user_connection_data {
Ok(v) => v,
Err(e) => {
error!("an error ocurred whilest reading connection data: {:?}", e);
continue;
}
};
let fun_ref = &mut creation_function;
new_rmc_gateway_connection(stream.into(), move |r| fun_ref(user_connection_data, r));
}
}

View file

@ -1,72 +0,0 @@
use std::{
io::Cursor,
net::SocketAddrV4,
sync::{Arc, atomic::AtomicU32},
};
use log::error;
use tokio::net::TcpListener;
use crate::{
executables::common::{OWN_IP_PRIVATE, SERVER_PORT},
nex::friends_handler::{FriendsGuest, FriendsManager, FriendsUser, RemoteFriendRemote},
reggie::UnitPacketRead,
rmc::{
protocols::{RmcPureRemoteObject, new_rmc_gateway_connection},
structures::RmcSerialize,
},
rnex_proxy_common::ConnectionInitData,
};
pub async fn start_friends_backend() {
let fm = Arc::new(FriendsManager {
cid_counter: AtomicU32::new(1),
users: Default::default(),
});
let listen = TcpListener::bind(SocketAddrV4::new(*OWN_IP_PRIVATE, *SERVER_PORT))
.await
.unwrap();
while let Ok((mut stream, _addr)) = listen.accept().await {
let buffer = match stream.read_buffer().await {
Ok(v) => v,
Err(e) => {
error!(
"an error ocurred whilest reading connection data buffer: {:?}",
e
);
continue;
}
};
let user_connection_data = ConnectionInitData::deserialize(&mut Cursor::new(buffer));
let c = match user_connection_data {
Ok(v) => v,
Err(e) => {
error!("an error ocurred whilest reading connection data: {:?}", e);
continue;
}
};
let fm = fm.clone();
if c.pid != 100 {
new_rmc_gateway_connection(stream.into(), move |r| {
Arc::new_cyclic(move |this| FriendsUser {
fm,
addr: c.prudpsock_addr,
pid: c.pid,
data: Default::default(),
current_friends: Default::default(),
this: this.clone(),
remote: RemoteFriendRemote::new(r),
})
});
} else {
new_rmc_gateway_connection(stream.into(), move |_| {
Arc::new_cyclic(move |_| FriendsGuest {
fm,
addr: c.prudpsock_addr,
})
});
}
}
}

View file

@ -1,10 +0,0 @@
use cfg_if::cfg_if;
pub mod common;
cfg_if! {
if #[cfg(feature = "friends")]{
pub mod friends_backend;
} else {
pub mod regular_backend;
}
}

View file

@ -1,34 +0,0 @@
use std::sync::{Arc, atomic::AtomicU32};
use crate::{
executables::common::new_simple_backend,
nex::{matchmake::MatchmakeManager, remote_console::RemoteConsole, user::User},
rmc::protocols::RmcPureRemoteObject,
};
pub async fn start_regular_backend() {
let mmm = Arc::new(MatchmakeManager {
//gid_counter: AtomicU32::new(1),
sessions: Default::default(),
users: Default::default(),
users_by_pid: Default::default(),
rv_cid_counter: AtomicU32::new(1),
});
let weak_mmm = Arc::downgrade(&mmm);
MatchmakeManager::initialize_garbage_collect_thread(weak_mmm).await;
new_simple_backend(move |c, r| {
let mmm = mmm.clone();
Arc::new_cyclic(move |this| User {
this: this.clone(),
ip: c.prudpsock_addr,
pid: c.pid,
remote: RemoteConsole::new(r),
matchmake_manager: mmm,
station_url: Default::default(),
})
})
.await;
}

View file

@ -1,225 +0,0 @@
use crate::grpc::account::Error::SomethingHappened;
use json::{JsonValue, object};
use once_cell::sync::Lazy;
use rnex_core::PID;
use std::array::TryFromSliceError;
use std::ops::Deref;
use std::{env, result};
use thiserror::Error;
use tokio::task::{JoinError, spawn_blocking};
static API_KEY: Lazy<String> = Lazy::new(|| {
let key = env::var("ACCOUNT_GQL_API_KEY").expect("no graphql ip specified");
key
});
static CLIENT_URI: Lazy<String> = Lazy::new(|| {
env::var("ACCOUNT_GQL_URL")
.ok()
.and_then(|s| s.parse().ok())
.expect("no graphql ip specified")
});
#[derive(Error, Debug)]
pub enum Error {
#[error(transparent)]
RequestError(#[from] ureq::Error),
#[error(transparent)]
Json(#[from] json::Error),
//#[error(transparent)]
//Status(#[from] tonic::Status),
#[error("invalid password size: {0}")]
PasswordConversion(#[from] TryFromSliceError),
#[error("something happened")]
SomethingHappened,
#[error("error joining blocking task: {0}")]
Join(#[from] JoinError),
}
pub type Result<T> = result::Result<T, Error>;
pub struct Client; //(reqwest::Client);
impl Client {
pub async fn new() -> Result<Self> {
//Ok(Self(reqwest::ClientBuilder::new().build()?))
Ok(Self)
}
async fn do_request(&self, request_data: JsonValue) -> Result<JsonValue> {
let request = ureq::post(CLIENT_URI.as_str())
.header("X-API-Key", API_KEY.deref())
.content_type("application/json");
let mut response = spawn_blocking(move || request.send(request_data.to_string())).await??;
let str_body = response.body_mut().read_to_string()?;
Ok(json::parse(&str_body)?)
/*
let mut request = reqwest::Request::new(Method::POST, Url::from_str(CLIENT_URI.as_str()).unwrap());
*(request.body_mut()) = Some(Body::from(request_data.to_string()));
request.headers_mut().insert("X-API-Key", HeaderValue::from_str(&API_KEY).unwrap());
request.headers_mut().insert("Content-Type", HeaderValue::from_str("application/json").unwrap());
let response = self.0.execute(request).await?;
Ok(json::parse(&response.text().await?)?)
*/
}
pub async fn get_nex_password(&mut self, pid: PID) -> Result<[u8; 16]> {
let req = self
.do_request(object! {
"query": r"query($pid: Int!){
userByPid(pid: $pid){
nexPassword
}
}",
"variables": {
"pid": pid
}
})
.await?;
let Some(val) = req
.entries()
.find(|v| v.0 == "data")
.ok_or(SomethingHappened)?
.1
.entries()
.find(|v| v.0 == "userByPid")
.ok_or(SomethingHappened)?
.1
.entries()
.find(|v| v.0 == "nexPassword")
.ok_or(SomethingHappened)?
.1
.as_str()
else {
return Err(SomethingHappened);
};
Ok(val.as_bytes().try_into().map_err(|_| SomethingHappened)?)
}
pub async fn get_user_level(&mut self, pid: PID) -> Result<i32> {
let req = self
.do_request(object! {
"query": r"query($pid: Int!){
userByPid(pid: $pid){
accountLevel
}
}",
"variables": {
"pid": pid
}
})
.await?;
let Some(val) = req
.entries()
.find(|v| v.0 == "data")
.ok_or(SomethingHappened)?
.1
.entries()
.find(|v| v.0 == "userByPid")
.ok_or(SomethingHappened)?
.1
.entries()
.find(|v| v.0 == "accountLevel")
.ok_or(SomethingHappened)?
.1
.as_i32()
else {
return Err(SomethingHappened);
};
Ok(val)
}
pub async fn get_pid_from_token(&mut self, token: String) -> Result<PID> {
let req = self
.do_request(object! {
"query":
r"query($token: String!){
token(tokenData: $token){
pid
}
}",
"variables": {
"token": token
}
})
.await?;
// this breaks switch nex servers and should be fixed eventually
let Some(val) = req
.entries()
.find(|v| v.0 == "data")
.ok_or(SomethingHappened)?
.1
.entries()
.find(|v| v.0 == "token")
.ok_or(SomethingHappened)?
.1
.entries()
.find(|v| v.0 == "pid")
.ok_or(SomethingHappened)?
.1
.as_u32()
else {
return Err(SomethingHappened);
};
Ok(val)
}
/*pub async fn get_user_data(&mut self , pid: u32) -> Result<GetUserDataResponse>{
let req = Request::new(GetUserDataRequest{
pid
});
let response = self.0.get_user_data(req).await?.into_inner();
Ok(response)
}*/
}
/*
pub struct Client(AccountClient<InterceptedService<Channel, InterceptorFunc>>);
impl Client{
pub async fn new() -> Result<Self>{
let channel = Channel::from_static(&*CLIENT_URI).connect().await?;
let func = Box::new(&|mut req: Request<()>|{
req.metadata_mut().insert("x-api-key", API_KEY.clone());
Ok(req)
}) as InterceptorFunc;
let client = AccountClient::with_interceptor(channel, func);
Ok(Self(client))
}
pub async fn get_nex_password(&mut self , pid: u32) -> Result<[u8; 16]>{
let req = Request::new(GetNexPasswordRequest{
pid
});
let response = self.0.get_nex_password(req).await?.into_inner();
Ok(response.password.as_bytes().try_into()?)
}
pub async fn get_user_data(&mut self , pid: u32) -> Result<GetUserDataResponse>{
let req = Request::new(GetUserDataRequest{
pid
});
let response = self.0.get_user_data(req).await?.into_inner();
Ok(response)
}
}
*/

View file

@ -1,223 +0,0 @@
use bytemuck::{Pod, Zeroable, bytes_of};
use cfg_if::cfg_if;
use chrono::{Datelike, NaiveDate, NaiveDateTime, NaiveTime, Timelike, Utc};
use hmac::Hmac;
use hmac::Mac;
use md5::{Digest, Md5};
use rc4::KeyInit;
use rc4::cipher::StreamCipherCoreWrapper;
use rc4::{Rc4, Rc4Core, StreamCipher};
use rnex_core::rmc::structures::RmcSerialize;
use std::io::{Read, Write};
use typenum::U16;
use typenum::Unsigned;
use rnex_core::rmc::structures::Result;
use rnex_core::PID;
cfg_if! {
if #[cfg(feature = "friends")]{
pub type SessionLengthTy = U16;
} else {
use rc4::consts::U32;
pub type SessionLengthTy = U32;
}
}
pub const SESSION_KEY_LENGTH: usize = SessionLengthTy::USIZE;
type Md5Hmac = Hmac<md5::Md5>;
pub fn derive_key(pid: PID, password: &[u8]) -> [u8; 16] {
let iteration_count = 65000 + pid % 1024;
// we do one iteration out here to ensure the key is always 16 bytes
let mut key: [u8; 16] = {
let mut md5 = Md5::new();
md5.update(password);
md5.finalize().try_into().unwrap()
};
for _ in 1..iteration_count {
let mut md5 = Md5::new();
md5.update(key);
key = md5.finalize().try_into().unwrap();
}
key
}
#[derive(Pod, Zeroable, Copy, Clone, Debug, Eq, PartialEq, Default)]
#[repr(transparent)]
pub struct KerberosDateTime(pub u64);
impl KerberosDateTime {
pub fn from_u64(val: u64) -> Self {
Self(val)
}
pub fn from_naive(dt: chrono::NaiveDateTime) -> Self {
use chrono::Datelike;
use chrono::Timelike;
Self::new(
dt.second() as u64,
dt.minute() as u64,
dt.hour() as u64,
dt.day() as u64,
dt.month() as u64,
dt.year() as u64,
)
}
pub fn new(second: u64, minute: u64, hour: u64, day: u64, month: u64, year: u64) -> Self {
Self(second | (minute << 6) | (hour << 12) | (day << 17) | (month << 22) | (year << 26))
}
pub fn now() -> Self {
let now = chrono::Utc::now();
Self::new(
now.second() as u64,
now.minute() as u64,
now.hour() as u64,
now.day() as u64,
now.month() as u64,
now.year() as u64,
)
}
#[inline]
pub fn get_seconds(&self) -> u8 {
(self.0 & 0b111111) as u8
}
#[inline]
pub fn get_minutes(&self) -> u8 {
((self.0 >> 6) & 0b111111) as u8
}
#[inline]
pub fn get_hours(&self) -> u8 {
((self.0 >> 12) & 0b111111) as u8
}
#[inline]
pub fn get_days(&self) -> u8 {
((self.0 >> 17) & 0b111111) as u8
}
#[inline]
pub fn get_month(&self) -> u8 {
((self.0 >> 22) & 0b1111) as u8
}
#[inline]
pub fn get_year(&self) -> u64 {
(self.0 >> 26) & 0xFFFFFFFF
}
pub fn to_regular_time(&self) -> chrono::DateTime<Utc> {
NaiveDateTime::new(
NaiveDate::from_ymd_opt(
self.get_year() as i32,
self.get_month() as u32,
self.get_days() as u32,
)
.unwrap(),
NaiveTime::from_hms_opt(
self.get_hours() as u32,
self.get_minutes() as u32,
self.get_seconds() as u32,
)
.unwrap(),
)
.and_utc()
}
}
impl RmcSerialize for KerberosDateTime {
fn serialize(&self, writer: &mut impl Write) -> Result<()> {
Ok(self.0.serialize(writer)?)
}
fn deserialize(reader: &mut impl Read) -> Result<Self> {
Ok(Self(u64::deserialize(reader)?))
}
}
#[derive(Pod, Zeroable, Copy, Clone)]
#[repr(C, packed)]
pub struct TicketInternalData {
pub issued_time: KerberosDateTime,
pub pid: PID,
pub session_key: [u8; SESSION_KEY_LENGTH],
}
impl TicketInternalData {
pub(crate) fn new(pid: PID) -> Self {
Self {
issued_time: KerberosDateTime::now(),
pid,
session_key: rand::random(),
}
}
pub(crate) fn encrypt(&self, key: [u8; 16]) -> Box<[u8]> {
let mut data = bytes_of(self).to_vec();
let mut rc4: StreamCipherCoreWrapper<Rc4Core<U16>> = Rc4::new_from_slice(&key).unwrap();
rc4.apply_keystream(&mut data);
let mut hmac = <Md5Hmac as KeyInit>::new_from_slice(&key).unwrap();
hmac.write_all(&data[..])
.expect("failed to write data to hmac");
let hmac_result = &hmac.finalize().into_bytes()[..];
data.write_all(&hmac_result)
.expect("failed to write data to vec");
data.into_boxed_slice()
}
}
#[derive(Pod, Zeroable, Debug, Copy, Clone)]
#[repr(C, packed)]
pub struct Ticket {
pub session_key: [u8; SESSION_KEY_LENGTH],
pub pid: PID,
}
impl Ticket {
pub fn encrypt(&self, key: [u8; 16], internal_data: &[u8]) -> Box<[u8]> {
let mut data = bytes_of(self).to_vec();
internal_data
.serialize(&mut data)
.expect("unable to write to vec");
let mut rc4: StreamCipherCoreWrapper<Rc4Core<U16>> = Rc4::new_from_slice(&key).unwrap();
rc4.apply_keystream(&mut data);
let mut hmac = <Md5Hmac as KeyInit>::new_from_slice(&key).unwrap();
hmac.write_all(&data[..])
.expect("failed to write data to hmac");
let hmac_result = &hmac.finalize().into_bytes()[..];
data.write_all(&hmac_result)
.expect("failed to write data to vec");
data.into_boxed_slice()
}
}
#[cfg(test)]
mod test {
use crate::kerberos::KerberosDateTime;
#[test]
fn kerberos_time_convert_test() {
let time = KerberosDateTime(135904948834);
println!("{}", time.to_regular_time().to_rfc2822());
}
}

View file

@ -1,32 +0,0 @@
use macros::RmcSerialize;
use rnex_core::PID;
#[derive(RmcSerialize, Clone)]
pub struct Account {
pub pid: PID,
pub username: String,
pub kerbros_password: Box<[u8]>,
}
impl Account {
pub fn new(pid: PID, username: &str, passwd: &str) -> Self {
Self {
kerbros_password: passwd.as_bytes().into(),
username: username.into(),
pid,
}
}
pub fn new_raw_password(pid: PID, username: &str, passwd: &[u8]) -> Self {
Self {
kerbros_password: passwd.into(),
username: username.into(),
pid,
}
}
pub fn get_login_data(&self) -> (PID, &[u8]) {
(self.pid, &self.kerbros_password)
}
}

View file

@ -1,339 +0,0 @@
use crate::grpc::account;
use crate::reggie::{RemoteEdgeNodeHolder, RemoteEdgeNodeManagement};
use crate::{define_rmc_proto, kerberos};
use cfg_if::cfg_if;
use log::{info, warn};
use macros::rmc_struct;
use rnex_core::PID;
use rnex_core::kerberos::{KerberosDateTime, Ticket, derive_key};
use rnex_core::nex::account::Account;
use rnex_core::rmc::protocols::OnlyRemote;
use rnex_core::rmc::protocols::auth::{Auth, RawAuth, RawAuthInfo, RemoteAuth};
use rnex_core::rmc::response::ErrorCode;
use rnex_core::rmc::response::ErrorCode::Core_Unknown;
use rnex_core::rmc::structures::any::Any;
use rnex_core::rmc::structures::connection_data::ConnectionData;
use rnex_core::rmc::structures::connection_data::ConnectionDataOld;
use rnex_core::rmc::structures::qresult::QResult;
use std::hash::{DefaultHasher, Hasher};
use std::net::SocketAddrV4;
use std::sync::{Arc, LazyLock};
define_rmc_proto!(
proto AuthClientProtocol{
Auth
}
);
#[rmc_struct(AuthClientProtocol)]
pub struct AuthHandler {
pub destination_server_acct: &'static Account,
pub build_name: &'static str,
//pub station_url: &'static str,
pub control_server: Arc<OnlyRemote<RemoteEdgeNodeHolder>>,
}
pub fn generate_ticket(
source_act_login_data: (PID, &[u8]),
dest_act_login_data: (PID, &[u8]),
) -> Box<[u8]> {
let source_key = derive_key(source_act_login_data.0, source_act_login_data.1);
let dest_key = derive_key(dest_act_login_data.0, dest_act_login_data.1);
let internal_data = kerberos::TicketInternalData::new(source_act_login_data.0);
let encrypted_inner = internal_data.encrypt(dest_key);
let encrypted_session_ticket = Ticket {
pid: dest_act_login_data.0,
session_key: internal_data.session_key,
}
.encrypt(source_key, &encrypted_inner);
encrypted_session_ticket
}
pub fn generate_ticket_with_string_user_key(
source_act: PID,
dest_act_login_data: (PID, &[u8]),
) -> (String, Box<[u8]>) {
let source_key: [u8; 8] = rand::random();
let key_string = hex::encode(source_key);
let key_data: [u8; 16] = key_string.as_bytes().try_into().unwrap();
let dest_key = derive_key(dest_act_login_data.0, dest_act_login_data.1);
let internal_data = kerberos::TicketInternalData::new(source_act);
let encrypted_inner = internal_data.encrypt(dest_key);
let encrypted_session_ticket = Ticket {
pid: dest_act_login_data.0,
session_key: internal_data.session_key,
}
.encrypt(key_data, &encrypted_inner);
(key_string, encrypted_session_ticket)
}
async fn get_login_data_by_pid(pid: PID) -> Option<(PID, Box<[u8]>)> {
if pid == GUEST_ACCOUNT.pid {
let source_login_data = GUEST_ACCOUNT.get_login_data();
return Some((source_login_data.0, source_login_data.1.into()));
}
let Ok(mut client) = account::Client::new().await else {
return None;
};
let Ok(passwd) = client.get_nex_password(pid).await else {
return None;
};
Some((pid, passwd.into()))
}
fn station_url_from_sock_addr(sock_addr: SocketAddrV4) -> String {
format!(
"prudps:/PID=2;sid=1;stream=10;type=2;address={};port={};CID=1",
sock_addr.ip(),
sock_addr.port()
)
}
static GUEST_ACCOUNT: LazyLock<Account> =
LazyLock::new(|| Account::new(100, "guest", "MMQea3n!fsik"));
impl AuthHandler {
pub async fn generate_ticket_from_name(
&self,
name: &str,
) -> Result<(PID, Box<[u8]>), ErrorCode> {
#[cfg(feature = "guest_login")]
{
if name == GUEST_ACCOUNT.username {
let source_login_data = GUEST_ACCOUNT.get_login_data();
let destination_login_data = self.destination_server_acct.get_login_data();
return Ok((
source_login_data.0,
generate_ticket(source_login_data, destination_login_data),
));
}
}
let Ok(pid) = name.parse() else {
warn!("unable to connect to parse pid: {}", name);
return Err(ErrorCode::Core_InvalidArgument);
};
let Ok(mut client) = account::Client::new().await else {
warn!("unable to connect to grpc");
return Err(ErrorCode::Core_Exception);
};
let Ok(passwd) = client.get_nex_password(pid).await else {
warn!("unable to get nex password for pid: {}:", pid);
return Err(ErrorCode::Core_Exception);
};
let source_login_data = (pid, &passwd[..]);
println!("{}, {:?}", pid, passwd);
let destination_login_data = self.destination_server_acct.get_login_data();
Ok((
pid,
generate_ticket(source_login_data, destination_login_data),
))
}
pub async fn generate_ticket_from_name_string_user_key(
&self,
name: &str,
) -> Result<(PID, String, Box<[u8]>), ErrorCode> {
{
if name == GUEST_ACCOUNT.username {
let source_login_data = GUEST_ACCOUNT.get_login_data();
let destination_login_data = self.destination_server_acct.get_login_data();
let ticket = generate_ticket_with_string_user_key(
source_login_data.0,
destination_login_data,
);
return Ok((source_login_data.0, ticket.0, ticket.1));
}
}
let Ok(pid) = name.parse() else {
warn!("unable to connect to parse pid: {}", name);
return Err(ErrorCode::Core_InvalidArgument);
};
let destination_login_data = self.destination_server_acct.get_login_data();
let data = generate_ticket_with_string_user_key(pid, destination_login_data);
Ok((pid, data.0, data.1))
}
}
impl Auth for AuthHandler {
async fn login(
&self,
name: String,
) -> Result<(QResult, PID, Vec<u8>, ConnectionDataOld, String), ErrorCode> {
let (pid, ticket) = self.generate_ticket_from_name(&name).await?;
let result = QResult::success(Core_Unknown);
let mut hasher = DefaultHasher::new();
hasher.write(name.as_bytes());
let Ok(addr) = self.control_server.get_url(hasher.finish()).await else {
warn!("no secure proxies");
return Err(ErrorCode::Core_Exception);
};
let connection_data = ConnectionDataOld {
station_url: station_url_from_sock_addr(addr),
special_station_url: "".to_string(),
special_protocols: Vec::new(),
};
let ret = (
result,
pid,
ticket.into(),
connection_data,
self.build_name.to_string(),
);
info!("data: {:?}", ret);
Ok(ret)
}
cfg_if! {
if #[cfg(feature = "nx")]{
async fn login_ex(
&self,
name: String,
_extra_data: Any,
) -> Result<(QResult, PID, Vec<u8>, ConnectionData, String, String), ErrorCode> {
let (pid, key, ticket) = self.generate_ticket_from_name_string_user_key(&name).await?;
let result = QResult::success(Core_Unknown);
let mut hasher = DefaultHasher::new();
hasher.write(name.as_bytes());
let Ok(addr) = self.control_server.get_url(hasher.finish()).await else {
warn!("no secure proxies");
return Err(ErrorCode::Core_Exception);
};
let connection_data = ConnectionData {
station_url: station_url_from_sock_addr(addr),
special_station_url: "".to_string(),
//date_time: KerberosDateTime::new(1,1,1,1,1,1),
date_time: KerberosDateTime::now(),
special_protocols: Vec::new(),
};
let ret = (
result,
pid,
ticket.into(),
connection_data,
self.build_name.to_string(),
key
);
info!("data: {:?}", ret);
Ok(ret)
}
async fn request_ticket(
&self,
source_pid: PID,
destination_pid: PID,
) -> Result<(QResult, Vec<u8>, String), ErrorCode> {
let Some((pid, _)) = get_login_data_by_pid(source_pid).await else {
return Err(ErrorCode::Core_Exception);
};
let desgination_login_data = if destination_pid == self.destination_server_acct.pid {
self.destination_server_acct.get_login_data()
} else {
return Err(ErrorCode::RendezVous_InvalidOperation);
};
let result = QResult::success(Core_Unknown);
let ticket = generate_ticket_with_string_user_key(pid, desgination_login_data);
Ok((result, ticket.1.into(), ticket.0))
}
} else {
async fn login_ex(
&self,
name: String,
_extra_data: Any,
) -> Result<(QResult, PID, Vec<u8>, ConnectionData, String), ErrorCode> {
let (pid, ticket) = self.generate_ticket_from_name(&name).await?;
let result = QResult::success(Core_Unknown);
let mut hasher = DefaultHasher::new();
hasher.write(name.as_bytes());
let Ok(addr) = self.control_server.get_url(hasher.finish()).await else {
warn!("no secure proxies");
return Err(ErrorCode::Core_Exception);
};
let connection_data = ConnectionData {
station_url: station_url_from_sock_addr(addr),
special_station_url: "".to_string(),
//date_time: KerberosDateTime::new(1,1,1,1,1,1),
date_time: KerberosDateTime::now(),
special_protocols: Vec::new(),
};
let ret = (
result,
pid,
ticket.into(),
connection_data,
self.build_name.to_string(),
);
info!("data: {:?}", ret);
Ok(ret)
}
async fn request_ticket(
&self,
source_pid: PID,
destination_pid: PID,
) -> Result<(QResult, Vec<u8>), ErrorCode> {
let Some((pid, passwd)) = get_login_data_by_pid(source_pid).await else {
return Err(ErrorCode::Core_Exception);
};
let desgination_login_data = if destination_pid == self.destination_server_acct.pid {
self.destination_server_acct.get_login_data()
} else {
return Err(ErrorCode::RendezVous_InvalidOperation);
};
let result = QResult::success(Core_Unknown);
let ticket = generate_ticket((pid, &passwd[..]), desgination_login_data);
Ok((result, ticket.into()))
}
}
}
async fn get_pid(&self, _username: String) -> Result<u32, ErrorCode> {
Err(ErrorCode::Core_Exception)
}
async fn get_name(&self, _pid: PID) -> Result<String, ErrorCode> {
Err(ErrorCode::Core_Exception)
}
}

View file

@ -1,95 +0,0 @@
use rnex_core::prudp::station_url::StationUrl;
use rnex_core::prudp::station_url::UrlOptions::{
Address, NatFiltering, NatMapping, NatType, Port, PrincipalID, RVConnectionID,
};
use rnex_core::prudp::station_url::nat_types::PUBLIC;
use rnex_core::rmc::response::ErrorCode::Core_Exception;
use rnex_core::prudp::socket_addr::PRUDPSockAddr;
use rnex_core::rmc::response::ErrorCode;
use rnex_core::PID;
pub async fn get_station_urls(
station_urls: &[StationUrl],
addr: PRUDPSockAddr,
pid: PID,
cid: u32,
) -> Result<Vec<StationUrl>, ErrorCode> {
let mut public_station: Option<StationUrl> = None;
let mut private_station: Option<StationUrl> = None;
for station in station_urls {
let is_public = station.options.iter().any(|v| {
if let NatType(v) = v {
if *v & PUBLIC != 0 {
return true;
}
}
false
});
let Some(nat_filtering) = station.options.iter().find_map(|v| match v {
NatFiltering(v) => Some(v),
_ => None,
}) else {
return Err(Core_Exception);
};
let Some(nat_mapping) = station.options.iter().find_map(|v| match v {
NatMapping(v) => Some(v),
_ => None,
}) else {
return Err(Core_Exception);
};
if !is_public || (*nat_filtering == 0 && *nat_mapping == 0) {
private_station = Some(station.clone());
}
if is_public {
public_station = Some(station.clone());
}
}
let Some(mut private_station) = private_station else {
return Err(Core_Exception);
};
let mut public_station = if let Some(public_station) = public_station {
public_station
} else {
let mut public_station = private_station.clone();
public_station.options.retain(|v| match v {
Address(_) | Port(_) | NatFiltering(_) | NatMapping(_) | NatType(_) => false,
_ => true,
});
public_station
.options
.push(Address(addr.regular_socket_addr.ip().clone()));
public_station
.options
.push(Port(addr.regular_socket_addr.port()));
public_station.options.push(NatFiltering(0));
public_station.options.push(NatMapping(0));
public_station.options.push(NatType(3));
public_station
};
let both = [&mut public_station, &mut private_station];
for station in both {
station.options.retain(|v| match v {
PrincipalID(_) | RVConnectionID(_) => false,
_ => true,
});
station.options.push(PrincipalID(pid));
station.options.push(RVConnectionID(cid));
}
Ok(vec![public_station])
}

File diff suppressed because it is too large Load diff

View file

@ -1,435 +0,0 @@
use std::io::{Cursor, Write};
use std::ops::Deref;
use std::sync::Weak;
use std::sync::{Arc, atomic::AtomicU32};
use bytemuck::bytes_of;
use hmac::Mac;
use log::info;
use macros::rmc_struct;
use rnex_core::rmc::protocols::account_management::{
AccountManagement, RawAccountManagement, RawAccountManagementInfo, RemoteAccountManagement,
};
use rnex_core::rmc::protocols::friends::{Friends, RawFriends, RawFriendsInfo, RemoteFriends};
use rnex_core::rmc::protocols::nintendo_notification::{
NintendoNotification, RawNintendoNotification, RawNintendoNotificationInfo,
RemoteNintendoNotification,
};
use rnex_core::rmc::protocols::secure::{RawSecure, RawSecureInfo, RemoteSecure, Secure};
use rnex_core::{
define_rmc_proto,
kerberos::KerberosDateTime,
nex::common::get_station_urls,
prudp::{socket_addr::PRUDPSockAddr, station_url::StationUrl},
rmc::{
protocols::friends::{
BlacklistedPrincipal, Comment, FriendInfo, FriendRequest, NNAInfo, NintendoPresenceV2,
PersistentNotification, PrincipalPreference,
},
response::ErrorCode,
structures::{any::Any, qresult::QResult},
},
};
use std::sync::atomic::Ordering::Relaxed;
use tokio::spawn;
use tokio::sync::RwLock;
use rnex_core::rmc::protocols::friends::{GameKey, MiiV2, PrincipalBasicInfo};
use rnex_core::PID;
use rnex_core::rmc::protocols::account_management::NintendoCreateAccountData;
use rnex_core::rmc::protocols::nintendo_notification::NintendoNotificationEvent;
use rnex_core::rmc::structures::RmcSerialize;
use rnex_core::rmc::structures::data::Data;
define_rmc_proto!(
proto FriendsUser{
Secure,
Friends
}
);
define_rmc_proto!(
proto FriendRemote{
NintendoNotification
}
);
define_rmc_proto!(
proto FriendsGuest{
Secure,
AccountManagement
}
);
pub struct UserData {
info: NNAInfo,
presence: NintendoPresenceV2,
}
#[rmc_struct(FriendsUser)]
pub struct FriendsUser {
pub fm: Arc<FriendsManager>,
pub addr: PRUDPSockAddr,
pub pid: PID,
pub data: RwLock<Option<UserData>>,
pub current_friends: RwLock<Vec<PID>>,
pub this: Weak<FriendsUser>,
pub remote: RemoteFriendRemote,
}
#[rmc_struct(FriendsGuest)]
pub struct FriendsGuest {
pub fm: Arc<FriendsManager>,
pub addr: PRUDPSockAddr,
}
pub struct FriendsManager {
pub cid_counter: AtomicU32,
pub users: RwLock<Vec<Weak<FriendsUser>>>,
}
impl FriendsManager {
pub fn next_cid(&self) -> u32 {
self.cid_counter.fetch_add(1, Relaxed)
}
}
pub fn friend_info_from_user(data: &UserData) -> FriendInfo {
FriendInfo {
data: Data {},
nna_info: data.info.clone(),
presence: data.presence.clone(),
comment: Comment {
data: Data {},
unk: 0,
message: "haii =w=".to_string(),
last_changed: KerberosDateTime::now(),
},
became_friends: KerberosDateTime::now(),
last_online: KerberosDateTime::now(),
unk: 0,
}
}
impl Friends for FriendsUser {
async fn update_and_get_all_information(
&self,
info: NNAInfo,
presence: NintendoPresenceV2,
_date_time: KerberosDateTime,
) -> Result<
(
PrincipalPreference,
Comment,
Vec<FriendInfo>,
Vec<FriendRequest>,
Vec<FriendRequest>,
Vec<BlacklistedPrincipal>,
bool,
Vec<PersistentNotification>,
bool,
),
ErrorCode,
> {
println!("updating own data");
let mut data = self.data.write().await;
*data = Some(UserData { info, presence });
let self_fr_info = friend_info_from_user(data.as_ref().unwrap());
let Ok(any_self_fr_info) = Any::new(&self_fr_info) else {
return Err(ErrorCode::RendezVous_ControlScriptFailure);
};
let Ok(any_self_presence) = Any::new(&self_fr_info.presence) else {
return Err(ErrorCode::RendezVous_ControlScriptFailure);
};
drop(data);
let mut fr_list = vec![FriendInfo {
data: Data{},
became_friends: KerberosDateTime::now(),
comment: Comment {
data: Data{},
last_changed: KerberosDateTime::now(),
message: "I'm just a dummy account :3".to_string(),
unk: 0,
},
last_online: KerberosDateTime::now(),
nna_info: NNAInfo {
data: Data{},
principal_basic_info: PrincipalBasicInfo {
data: Data{},
pid: 101,
nnid: "dummy:3".to_string(),
mii: MiiV2{
data: Data{},
date_time: KerberosDateTime::now(),
name: "TheDummy".to_string(),
mii_data: hex::decode("030000402bd7c32986a771f2dc6b35e31da15e37ff7c0000391e6f006f006d0069000000000000000000000000004040001065033568641e2013661a611821640f0000290052485000000000000000000000000000000000000000000000e838").unwrap(),
unk: 0,
unk2: 0,
},
unk: 0
},
unk: 0,
unk2: 0
},
presence: NintendoPresenceV2{
data: Data{},
changed_flags: 0,
message: "".to_string(),
app_data: vec![],
game_key: GameKey{
data: Data{},
tid: 0x00050002101ce400,
version: 0x0
},
game_server_id: 0,
is_online: true,
gid: 0,
pid: 101,
unk: 0,
unk2: 0,
unk3: 0,
unk4: 0,
unk5: 0,
unk6: 0,
unk7: 0
},
unk: 0
}];
println!("acquiring user and current friends locks");
let users = self.fm.users.read().await;
if users.iter().filter(|u| u.upgrade().is_some()).count() >= 100 {
return Err(ErrorCode::RendezVous_ConnectionFailure);
}
println!("started summing users");
for u in users.deref().iter().filter_map(|u| u.upgrade()) {
let data = u.data.read().await;
let Some(inner_data) = data.as_ref() else {
continue;
};
fr_list.push(friend_info_from_user(&inner_data));
drop(data);
let mut curr_friends = self.current_friends.write().await;
curr_friends.push(u.pid);
drop(curr_friends);
let mut fr = u.current_friends.write().await;
if !fr.contains(&self.pid) {
fr.push(self.pid);
drop(fr);
let data = any_self_fr_info.clone();
let u = u.clone();
let sender = self.pid;
spawn(async move {
u.remote
.process_nintendo_notification_event_1(NintendoNotificationEvent {
event_type: 30,
sender,
data,
})
.await;
});
} else {
let data = any_self_presence.clone();
let u = u.clone();
let sender = self.pid;
spawn(async move {
u.remote
.process_nintendo_notification_event_2(NintendoNotificationEvent {
event_type: 24,
sender,
data,
})
.await;
});
drop(fr);
}
}
println!("finished summing users");
drop(users);
println!("adding self to users");
let mut users = self.fm.users.write().await;
users.push(self.this.clone());
drop(users);
println!("done...");
Ok((
PrincipalPreference {
data: Data {},
block_friend_request: false,
show_online: false,
show_playing_title: false,
},
Comment {
data: Data {},
last_changed: KerberosDateTime::now(),
message: "".to_string(),
unk: 0,
},
fr_list,
vec![],
vec![],
vec![],
false,
vec![],
false,
))
}
async fn update_presence(&self, presence: NintendoPresenceV2) -> Result<(), ErrorCode> {
info!("user updated presence: {:?}", presence);
let mut data = self.data.write().await;
let Some(inner_data) = data.as_mut() else {
log::error!("unable to get presence data");
return Err(ErrorCode::RendezVous_PermissionDenied);
};
inner_data.presence = presence;
let Ok(any_self_fr_info) = Any::new(&inner_data.presence) else {
log::error!("unable to create presence any data holder");
return Err(ErrorCode::RendezVous_ControlScriptFailure);
};
drop(data);
let users = self.fm.users.read().await;
for u in users.deref().iter().filter_map(|u| u.upgrade()) {
info!("sending presence update");
u.remote
.process_nintendo_notification_event_2(NintendoNotificationEvent {
event_type: 24,
sender: self.pid,
data: any_self_fr_info.clone(),
})
.await;
}
drop(users);
Ok(())
}
async fn delete_persistent_notification(
&self,
_notifs: Vec<PersistentNotification>,
) -> Result<(), ErrorCode> {
Ok(())
}
async fn check_setting_status(&self) -> Result<u8, ErrorCode> {
Ok(0xFF)
}
async fn update_preference(&self, preference: PrincipalPreference) -> Result<(),ErrorCode> {
info!("user updated preference: {:?}", preference);
let any_presence: Any = Any::new(&preference).expect("out of memory");
let users = self.fm.users.read().await;
for u in users.deref().iter().filter_map(|u| u.upgrade()) {
info!("sending preference update");
u.remote
.process_nintendo_notification_event_2(NintendoNotificationEvent {
event_type: 23,
sender: self.pid,
data: any_presence.clone(),
})
.await;
}
drop(users);
Ok(())
}
}
type HMacMd5 = hmac::Hmac<md5::Md5>;
impl Secure for FriendsUser {
async fn register(
&self,
station_urls: Vec<StationUrl>,
) -> Result<(QResult, u32, StationUrl), ErrorCode> {
let cid = self.fm.next_cid();
let users = self.fm.users.read().await;
if users.iter().filter(|u| u.upgrade().is_some()).count() >= 100 {
return Err(ErrorCode::RendezVous_ConnectionFailure);
}
Ok((
QResult::success(ErrorCode::Core_Unknown),
cid,
get_station_urls(&station_urls, self.addr, self.pid, cid).await?[0].clone(),
))
}
async fn register_ex(
&self,
station_urls: Vec<StationUrl>,
_data: Any,
) -> Result<(QResult, u32, StationUrl), ErrorCode> {
info!("register");
self.register(station_urls).await
}
async fn replace_url(&self, _target: StationUrl, _dest: StationUrl) -> Result<(), ErrorCode> {
Err(ErrorCode::Core_NotImplemented)
}
}
impl Secure for FriendsGuest {
async fn register(
&self,
station_urls: Vec<StationUrl>,
) -> Result<(QResult, u32, StationUrl), ErrorCode> {
let cid = self.fm.next_cid();
Ok((
QResult::success(ErrorCode::Core_Unknown),
cid,
get_station_urls(&station_urls, self.addr, 100, cid).await?[0].clone(),
))
}
async fn register_ex(
&self,
station_urls: Vec<StationUrl>,
_data: Any,
) -> Result<(QResult, u32, StationUrl), ErrorCode> {
info!("register");
self.register(station_urls).await
}
async fn replace_url(&self, _target: StationUrl, _dest: StationUrl) -> Result<(), ErrorCode> {
Err(ErrorCode::Core_NotImplemented)
}
}
impl AccountManagement for FriendsGuest {
async fn nintendo_create_account(
&self,
principal_name: String,
key: String,
groups: u32,
email: String,
auth_data: Any,
) -> Result<(PID, String), ErrorCode> {
println!("{}, {}, {}, {}", principal_name, key, groups, email);
if auth_data.name == "NintendoCreateAccountData" {
let Ok(data) =
NintendoCreateAccountData::deserialize(&mut Cursor::new(&auth_data.data))
else {
return Err(ErrorCode::Authentication_InvalidParam);
};
let pid = data.nna_info.principal_basic_info.pid;
info!("create account: {}", pid);
let Ok(mut mac) = HMacMd5::new_from_slice(key.as_bytes()) else {
return Err(ErrorCode::Authentication_InvalidParam);
};
mac.write_all(bytes_of(&pid))
.expect("failed to write to hmac???");
let mac = mac.finalize().into_bytes();
let hex_str = hex::encode(mac);
return Ok((pid, hex_str));
}
Err(ErrorCode::Core_NotImplemented)
}
}

View file

@ -1,519 +0,0 @@
use log::info;
use rand::random;
use rnex_core::PID;
use rnex_core::kerberos::KerberosDateTime;
use rnex_core::nex::user::User;
use rnex_core::rmc::protocols::notifications::notification_types::{
HOST_CHANGED, OWNERSHIP_CHANGED,
};
use rnex_core::rmc::protocols::notifications::{NotificationEvent, RemoteNotification};
use rnex_core::rmc::response::ErrorCode;
use rnex_core::rmc::response::ErrorCode::{Core_InvalidArgument, RendezVous_SessionVoid};
use rnex_core::rmc::structures::matchmake::gathering_flags::PERSISTENT_GATHERING;
use rnex_core::rmc::structures::matchmake::{
Gathering, MatchmakeParam, MatchmakeSession, MatchmakeSessionSearchCriteria,
};
use rnex_core::rmc::structures::variant::Variant;
use std::collections::HashMap;
use std::str::FromStr;
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::{Arc, Weak};
use std::time::Duration;
use tokio::sync::{Mutex, RwLock};
use tokio::time::sleep;
pub struct MatchmakeManager {
//pub gid_counter: AtomicU32,
pub sessions: RwLock<HashMap<u32, Arc<Mutex<ExtendedMatchmakeSession>>>>,
pub rv_cid_counter: AtomicU32,
pub users: RwLock<HashMap<u32, Weak<User>>>,
pub users_by_pid: RwLock<HashMap<u32, Weak<User>>>,
}
impl MatchmakeManager {
pub fn next_gid(&self) -> u32 {
random()
//self.gid_counter.fetch_add(1, Relaxed)
}
pub fn next_cid(&self) -> u32 {
self.rv_cid_counter.fetch_add(1, Relaxed)
}
pub async fn get_session(
&self,
gid: u32,
) -> Result<Arc<Mutex<ExtendedMatchmakeSession>>, ErrorCode> {
let sessions = self.sessions.read().await;
let Some(session) = sessions.get(&gid) else {
return Err(RendezVous_SessionVoid);
};
let session = session.clone();
drop(sessions);
Ok(session)
}
async fn garbage_collect(&self) {
info!("running rnex garbage collector over all sessions and users");
let mut idx = 0;
let mut to_be_deleted_gids = Vec::new();
// i am very well aware of how inefficient doing it like this is but this is the only
// way which i could think of to do this without potentially causing a deadlock of
// the entire server
while let Some((gid, session)) = {
let sessions = self.sessions.read().await;
let session_pair = sessions.iter().nth(idx).map(|s| (*s.0, s.1.clone()));
drop(sessions);
session_pair
} {
let session = session.lock().await;
if !session.is_reachable() {
to_be_deleted_gids.push(gid);
}
idx += 1;
}
let mut sessions = self.sessions.write().await;
for gid in to_be_deleted_gids {
sessions.remove(&gid);
}
}
pub async fn initialize_garbage_collect_thread(this: Weak<Self>) {
tokio::spawn(async move {
while let Some(this) = this.upgrade() {
this.garbage_collect().await;
// every 30 minutes
sleep(Duration::from_secs(60 * 30)).await;
}
});
}
}
#[derive(Default, Debug)]
pub struct ExtendedMatchmakeSession {
pub session: MatchmakeSession,
pub connected_players: Vec<Weak<User>>,
}
fn read_bounds_string<T: FromStr>(str: &str) -> Option<(T, T)> {
let bounds = str.split_once(",")?;
Some((T::from_str(bounds.0).ok()?, T::from_str(bounds.1).ok()?))
}
fn check_bounds_str<T: FromStr + PartialOrd>(compare: T, str: &str) -> Option<bool> {
if let Some(bounds) = read_bounds_string::<T>(str) {
return Some(bounds.0 <= compare && compare <= bounds.1);
}
if let Ok(val) = T::from_str(str) {
return Some(val == compare);
}
if str.is_empty() {
return Some(true);
}
None
}
pub async fn broadcast_notification<T: AsRef<User>>(
players: &[T],
notification_event: &NotificationEvent,
) {
for player in players {
let player = player.as_ref();
player
.remote
.process_notification_event(notification_event.clone())
.await;
}
}
impl ExtendedMatchmakeSession {
#[inline(always)]
pub fn get_active_players(&self) -> Vec<Arc<User>> {
self.connected_players
.iter()
.filter_map(|u| u.upgrade())
.collect()
}
#[inline(always)]
pub async fn broadcast_notification(&self, notification_event: &NotificationEvent) {
broadcast_notification(&self.get_active_players(), notification_event).await;
}
pub async fn from_matchmake_session(
gid: u32,
session: MatchmakeSession,
host: &Weak<User>,
) -> Self {
let Some(host) = host.upgrade() else {
return Default::default();
};
cfg_if::cfg_if! {
if #[cfg(feature = "v3-5-0")]{
let mm_session = MatchmakeSession {
gathering: Gathering {
self_gid: gid,
owner_pid: host.pid,
host_pid: host.pid,
..session.gathering.clone()
},
datetime: KerberosDateTime::now(),
session_key: (0..32).map(|_| random()).collect(),
matchmake_param: MatchmakeParam {
params: vec![
("@SR".to_owned(), Variant::Bool(true)),
("@GIR".to_owned(), Variant::SInt64(3)),
],
},
system_password_enabled: false,
..session
};
return Self {
session: mm_session,
connected_players: Default::default(),
}
} else {
let mm_session = MatchmakeSession {
gathering: Gathering {
self_gid: gid,
owner_pid: host.pid,
host_pid: host.pid,
..session.gathering.clone()
},
session_key: (0..32).map(|_| random()).collect(),
..session
};
return Self {
session: mm_session,
connected_players: Default::default(),
}
}
}
}
pub async fn add_players(&mut self, conns: &[Weak<User>], join_msg: String) {
let Some(initiating_user) = conns[0].upgrade() else {
return;
};
let initiating_pid = initiating_user.pid;
let old_particip = self.connected_players.clone();
for conn in conns {
self.connected_players.push(conn.clone());
}
self.session.participation_count = self.connected_players.len() as u32;
for other_connection in &conns[1..] {
let Some(other_conn) = other_connection.upgrade() else {
continue;
};
let other_pid = other_conn.pid;
/*if other_pid == self.session.gathering.owner_pid &&
joining_pid == self.session.gathering.owner_pid{
continue;
}*/
other_conn
.remote
.process_notification_event(NotificationEvent {
pid_source: initiating_pid,
notif_type: 122000,
param_1: self.session.gathering.self_gid as PID,
param_2: other_pid,
str_param: "".into(),
#[cfg(feature = "third-notif-param")]
param_3: 0,
})
.await;
}
let list_of_connected_pids: Vec<_> = self
.connected_players
.iter()
.filter_map(|p| p.upgrade())
.map(|p| p.pid)
.collect();
for other_connection in conns {
let Some(other_conn) = other_connection.upgrade() else {
continue;
};
// let other_pid = other_conn.pid;
/*if other_pid == self.session.gathering.owner_pid &&
joining_pid == self.session.gathering.owner_pid{
continue;
}*/
for pid in &list_of_connected_pids {
other_conn
.remote
.process_notification_event(NotificationEvent {
pid_source: initiating_pid,
notif_type: 3001,
param_1: self.session.gathering.self_gid as PID,
param_2: *pid,
str_param: join_msg.clone(),
#[cfg(feature = "third-notif-param")]
param_3: self.connected_players.len() as _,
})
.await;
}
}
for old_conns in &old_particip {
let Some(old_conns) = old_conns.upgrade() else {
continue;
};
/*if old_conns.pid != self.session.gathering.host_pid {
continue;
}*/
for new_conn_pid in conns.iter().filter_map(Weak::upgrade).map(|c| c.pid) {
old_conns
.remote
.process_notification_event(NotificationEvent {
pid_source: initiating_pid,
notif_type: 3001,
param_1: self.session.gathering.self_gid as PID,
param_2: new_conn_pid,
str_param: join_msg.clone(),
#[cfg(feature = "third-notif-param")]
param_3: self.connected_players.len() as _,
})
.await;
}
}
}
pub fn has_active_players(&self) -> bool {
self.connected_players
.iter()
.filter(|v| v.upgrade().is_some())
.count()
!= 0
}
#[inline]
pub fn is_reachable(&self) -> bool {
(if self.session.gathering.flags & PERSISTENT_GATHERING != 0 {
if self.has_active_players() {
true
} else {
self.session.open_participation
}
} else {
self.has_active_players()
}) & self.has_active_players()
}
#[inline]
pub fn is_joinable(&self) -> bool {
self.is_reachable() && self.session.open_participation
}
pub fn matches_criteria(
&self,
search_criteria: &MatchmakeSessionSearchCriteria,
) -> Result<bool, ErrorCode> {
// todo: implement the rest of the search criteria
if search_criteria.vacant_only {
if (self.connected_players.len() as u16 + search_criteria.vacant_participants)
> self.session.gathering.maximum_participants
{
return Ok(false);
}
}
if search_criteria.exclude_locked {
if !self.session.open_participation {
return Ok(false);
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "v3-5-0")]{
if search_criteria.exclude_system_password_set {
if self.session.system_password_enabled {
return Ok(false);
}
}
if search_criteria.exclude_user_password_set {
if self.session.user_password_enabled {
return Ok(false);
}
}
}
}
if !check_bounds_str(
self.session.gathering.minimum_participants,
&search_criteria.minimum_participants,
)
.ok_or(Core_InvalidArgument)?
{
return Ok(false);
}
if !check_bounds_str(
self.session.gathering.maximum_participants,
&search_criteria.maximum_participants,
)
.ok_or(Core_InvalidArgument)?
{
return Ok(false);
}
let game_mode: u32 = search_criteria
.game_mode
.parse()
.map_err(|_| Core_InvalidArgument)?;
if self.session.gamemode != game_mode {
return Ok(false);
}
let mm_sys_type: u32 = search_criteria
.matchmake_system_type
.parse()
.map_err(|_| Core_InvalidArgument)?;
if self.session.matchmake_system_type != mm_sys_type {
return Ok(false);
}
#[cfg(feature = "splatoon")]
{
if search_criteria.attribs.get(0).is_some_and(|s| {
self.session
.attributes
.get(0)
.is_some_and(|a| s.0.contains(a))
}) {
return Ok(false);
}
if search_criteria.attribs.get(2).is_some_and(|s| {
self.session
.attributes
.get(2)
.is_some_and(|a| s.0.contains(a))
}) {
return Ok(false);
}
if search_criteria.attribs.get(3).is_some_and(|s| {
self.session
.attributes
.get(3)
.is_some_and(|a| s.0.contains(a))
}) {
return Ok(false);
}
}
Ok(true)
}
pub async fn migrate_ownership(&mut self, initiator_pid: PID) -> Result<(), ErrorCode> {
let players: Vec<_> = self
.connected_players
.iter()
.filter_map(|p| p.upgrade())
.collect();
let Some(new_owner) = players
.iter()
.find(|p| p.pid != self.session.gathering.owner_pid)
else {
self.session.gathering.owner_pid = 0;
return Ok(());
};
self.session.gathering.owner_pid = new_owner.pid;
self.broadcast_notification(&NotificationEvent {
pid_source: initiator_pid,
notif_type: OWNERSHIP_CHANGED,
param_1: self.session.gathering.self_gid as PID,
param_2: new_owner.pid,
..Default::default()
})
.await;
Ok(())
}
pub async fn migrate_host(&mut self, initiator_pid: PID) -> Result<(), ErrorCode> {
// let players: Vec<_> = self.connected_players.iter().filter_map(|p| p.upgrade()).collect();
self.session.gathering.host_pid = self.session.gathering.owner_pid;
self.broadcast_notification(&NotificationEvent {
pid_source: initiator_pid,
notif_type: HOST_CHANGED,
param_1: self.session.gathering.self_gid as PID,
..Default::default()
})
.await;
Ok(())
}
pub async fn remove_player_from_session(
&mut self,
pid: PID,
message: &str,
) -> Result<(), ErrorCode> {
self.connected_players
.retain(|u| u.upgrade().is_some_and(|u| u.pid != pid));
self.session.participation_count =
(self.connected_players.len() & u32::MAX as usize) as u32;
if pid == self.session.gathering.owner_pid {
self.migrate_ownership(pid).await?;
}
if pid == self.session.gathering.host_pid {
self.migrate_host(pid).await?;
}
// todo: support DisconnectChangeOwner
// todo: finish the rest of this
for player in self.connected_players.iter().filter_map(|p| p.upgrade()) {
player
.remote
.process_notification_event(NotificationEvent {
notif_type: 3008,
pid_source: pid,
param_1: self.session.gathering.self_gid as PID,
param_2: pid,
str_param: message.to_owned(),
..Default::default()
})
.await;
}
Ok(())
}
}

View file

@ -1,22 +0,0 @@
use cfg_if::cfg_if;
pub mod account;
pub mod auth_handler;
pub mod common;
cfg_if! {
if #[cfg(feature = "friends")]{
pub mod friends_handler;
} else {
pub mod matchmake;
pub mod remote_console;
pub mod user;
}
}
cfg_if! {
if #[cfg(feature = "datastore")] {
pub mod s3presigner;
pub mod datastore;
}
}

View file

@ -1,117 +0,0 @@
use aws_sdk_s3::presigning::PresigningConfig;
use base64::{engine::general_purpose, Engine as _};
use hmac::{Hmac, Mac};
use sha2::{Sha256, Digest};
use chrono::{Utc, Duration};
use serde_json::json;
use rnex_core::executables::common::RNEX_DATASTORE_S3_ENDPOINT;
pub struct S3Presigner {
endpoint: String,
bucket: String,
}
impl S3Presigner {
pub async fn new(endpoint: &str, bucket: String) -> Self {
Self {
endpoint: endpoint.trim_end_matches('/').to_string(),
bucket,
}
}
pub async fn generate_presigned_post(&self, key: &str) -> (String, Vec<(String, String)>) {
let access_key = std::env::var("AWS_ACCESS_KEY_ID").expect("Missing Access Key");
let secret_key = std::env::var("AWS_SECRET_ACCESS_KEY").expect("Missing Secret Key");
let region = "us-east-1"; // hardcoded because its the default region for most s3 clones
let date_short = Utc::now().format("%Y%m%d").to_string();
let date_full = Utc::now().format("%Y%m%dT%H%M%SZ").to_string();
let expiration = (Utc::now() + Duration::minutes(15)).format("%Y-%m-%dT%H:%M:%SZ").to_string();
let credential = format!("{}/{}/{}/s3/aws4_request", access_key, date_short, region);
let policy_json = json!({
"expiration": expiration,
"conditions": [
{"bucket": self.bucket},
["starts-with", "$key", key],
{"x-amz-credential": credential},
{"x-amz-algorithm": "AWS4-HMAC-SHA256"},
{"x-amz-date": date_full}
]
});
let policy_base64 = general_purpose::STANDARD.encode(policy_json.to_string());
let signature = self.calculate_signature(&secret_key, &date_short, region, &policy_base64);
let mut fields = vec![
("key".to_string(), key.to_string()),
("X-Amz-Algorithm".to_string(), "AWS4-HMAC-SHA256".to_string()),
("X-Amz-Credential".to_string(), credential),
("X-Amz-Date".to_string(), date_full),
("Policy".to_string(), policy_base64),
("X-Amz-Signature".to_string(), signature),
];
let url = format!("https://{}/{}", *RNEX_DATASTORE_S3_ENDPOINT, self.bucket);
(url, fields)
}
pub fn generate_presigned_get(&self, key: &str) -> String {
let access_key = std::env::var("AWS_ACCESS_KEY_ID").expect("Missing Access Key");
let secret_key = std::env::var("AWS_SECRET_ACCESS_KEY").expect("Missing Secret Key");
let region = "us-east-1";
let date_short = Utc::now().format("%Y%m%d").to_string();
let date_full = Utc::now().format("%Y%m%dT%H%M%SZ").to_string();
let credential_scope = format!("{}/{}/s3/aws4_request", date_short, region);
let query_string = format!(
"X-Amz-Algorithm=AWS4-HMAC-SHA256&\
X-Amz-Credential={}%2F{}&\
X-Amz-Date={}&\
X-Amz-Expires=900&\
X-Amz-SignedHeaders=host",
access_key,
urlencoding::encode(&credential_scope),
date_full
);
let canonical_request = format!(
"GET\n/{}/{}\n{}\nhost:{}\n\nhost\nUNSIGNED-PAYLOAD",
self.bucket, key, query_string, *RNEX_DATASTORE_S3_ENDPOINT
);
let hashed_request = hex::encode(Sha256::digest(canonical_request.as_bytes()));
let string_to_sign = format!(
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
date_full, credential_scope, hashed_request
);
let k_date = self.hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), &date_short);
let k_region = self.hmac_sha256(&k_date, region);
let k_service = self.hmac_sha256(&k_region, "s3");
let k_signing = self.hmac_sha256(&k_service, "aws4_request");
let signature = hex::encode(self.hmac_sha256(&k_signing, &string_to_sign));
format!(
"https://{}/{}/{}?{}&X-Amz-Signature={}",
*RNEX_DATASTORE_S3_ENDPOINT, self.bucket, key, query_string, signature
)
}
fn calculate_signature(&self, secret: &str, date: &str, region: &str, policy: &str) -> String {
let k_date = self.hmac_sha256(format!("AWS4{}", secret).as_bytes(), date);
let k_region = self.hmac_sha256(&k_date, region);
let k_service = self.hmac_sha256(&k_region, "s3");
let k_signing = self.hmac_sha256(&k_service, "aws4_request");
hex::encode(self.hmac_sha256(&k_signing, policy))
}
fn hmac_sha256(&self, key: &[u8], data: &str) -> Vec<u8> {
let mut mac = Hmac::<Sha256>::new_from_slice(key).expect("HMAC can take key of any size");
mac.update(data.as_bytes());
mac.finalize().into_bytes().to_vec()
}
}

View file

@ -1,922 +0,0 @@
use rnex_core::PID;
use rnex_core::define_rmc_proto;
use rnex_core::kerberos::KerberosDateTime;
use rnex_core::nex::common::get_station_urls;
use rnex_core::nex::matchmake::{ExtendedMatchmakeSession, MatchmakeManager};
use rnex_core::nex::remote_console::RemoteConsole;
use rnex_core::prudp::station_url::StationUrl;
use rnex_core::prudp::station_url::UrlOptions::{
Address, NatFiltering, NatMapping, Port, RVConnectionID,
};
use rnex_core::rmc::protocols::matchmake::{
Matchmake, RawMatchmake, RawMatchmakeInfo, RemoteMatchmake,
};
use rnex_core::rmc::protocols::matchmake_ext::{
MatchmakeExt, RawMatchmakeExt, RawMatchmakeExtInfo, RemoteMatchmakeExt,
};
use rnex_core::rmc::protocols::matchmake_extension::{
MatchmakeExtension, RawMatchmakeExtension, RawMatchmakeExtensionInfo, RemoteMatchmakeExtension,
};
use rnex_core::rmc::protocols::nat_traversal::{
NatTraversal, RawNatTraversal, RawNatTraversalInfo, RemoteNatTraversal,
RemoteNatTraversalConsole,
};
use rnex_core::rmc::protocols::notifications::notification_types::{
END_GATHERING, REQUEST_JOIN_GATHERING,
};
use rnex_core::rmc::protocols::ranking::{Ranking, RawRanking, RawRankingInfo, RemoteRanking};
use rnex_core::rmc::protocols::secure::{RawSecure, RawSecureInfo, RemoteSecure, Secure};
use rnex_core::rmc::protocols::util::{RawUtility, RawUtilityInfo, RemoteUtility, Utility};
use rnex_core::rmc::response::ErrorCode;
use rnex_core::rmc::structures::any::Any;
use rnex_core::rmc::structures::matchmake::{
AutoMatchmakeParam, CreateMatchmakeSessionParam, JoinMatchmakeSessionParam, MatchmakeSession,
};
use serde::{Deserialize, Serialize};
use std::env;
use std::str::FromStr;
use cfg_if::cfg_if;
use log::{error, info};
use macros::rmc_struct;
use rnex_core::prudp::socket_addr::PRUDPSockAddr;
use rnex_core::rmc::protocols::notifications::{NotificationEvent, RemoteNotification};
use rnex_core::rmc::protocols::ranking::{
CompetitionRankingGetParam, CompetitionRankingScoreData, CompetitionRankingScoreInfo,
};
use rnex_core::rmc::response::ErrorCode::{Core_InvalidArgument, RendezVous_AccountExpired};
use rnex_core::rmc::structures::qbuffer::QBuffer;
use rnex_core::rmc::structures::qresult::QResult;
use rnex_core::rmc::structures::ranking::UploadCompetitionData;
use std::sync::{Arc, Weak};
use tokio::sync::{Mutex, RwLock};
use crate::rmc::structures::matchmake::MatchmakeSessionSearchCriteria;
cfg_if! {
if #[cfg(feature = "datastore")] {
use rnex_core::rmc::protocols::datastore::{DataStore, RawDataStore, RawDataStoreInfo, RemoteDataStore};
define_rmc_proto!(
proto UserProtocol{
Secure,
MatchmakeExtension,
MatchmakeExt,
Matchmake,
NatTraversal,
Ranking,
Utility,
DataStore
}
);
} else {
define_rmc_proto!(
proto UserProtocol{
Secure,
MatchmakeExtension,
MatchmakeExt,
Matchmake,
NatTraversal,
Utility,
Ranking
}
);
}
}
#[rmc_struct(UserProtocol)]
pub struct User {
pub pid: PID,
pub ip: PRUDPSockAddr,
pub this: Weak<User>,
pub remote: RemoteConsole,
pub station_url: RwLock<Vec<StationUrl>>,
pub matchmake_manager: Arc<MatchmakeManager>,
}
impl Secure for User {
async fn register(
&self,
station_urls: Vec<StationUrl>,
) -> Result<(QResult, u32, StationUrl), ErrorCode> {
let cid = self.matchmake_manager.next_cid();
println!("{:?}", station_urls);
let mut users = self.matchmake_manager.users.write().await;
users.insert(cid, self.this.clone());
drop(users);
let mut users = self.matchmake_manager.users_by_pid.write().await;
users.insert(self.pid, self.this.clone());
drop(users);
let stations = get_station_urls(&station_urls, self.ip, self.pid, cid).await?;
let first = stations.first().unwrap().clone();
let mut lock = self.station_url.write().await;
*lock = stations;
drop(lock);
let result = QResult::success(ErrorCode::Core_Unknown);
Ok((result, cid, first))
}
async fn register_ex(
&self,
station_urls: Vec<StationUrl>,
_data: Any,
) -> Result<(QResult, u32, StationUrl), ErrorCode> {
self.register(station_urls).await
}
async fn replace_url(&self, target_url: StationUrl, dest: StationUrl) -> Result<(), ErrorCode> {
let mut lock = self.station_url.write().await;
let Some(target_addr) = target_url.options.iter().find(|v| matches!(v, Address(_))) else {
return Err(ErrorCode::Core_InvalidArgument);
};
let Some(target_port) = target_url.options.iter().find(|v| matches!(v, Port(_))) else {
return Err(ErrorCode::Core_InvalidArgument);
};
let Some(replacement_target) = lock.iter_mut().find(|url| {
url.options.iter().any(|o| o == target_addr)
&& url.options.iter().any(|o| o == target_port)
}) else {
return Err(ErrorCode::Core_InvalidArgument);
};
*replacement_target = dest;
drop(lock);
Ok(())
}
}
impl MatchmakeExtension for User {
async fn close_participation(&self, gid: u32) -> Result<(), ErrorCode> {
let session = self.matchmake_manager.get_session(gid).await?;
let mut session = session.lock().await;
session.session.open_participation = false;
Ok(())
}
async fn open_participation(&self, gid: u32) -> Result<(), ErrorCode> {
let session = self.matchmake_manager.get_session(gid).await?;
let mut session = session.lock().await;
session.session.open_participation = true;
Ok(())
}
async fn get_playing_session(&self, _pids: Vec<u32>) -> Result<Vec<()>, ErrorCode> {
Ok(Vec::new())
}
#[cfg(feature = "v3-5-0")]
async fn update_progress_score(&self, gid: u32, progress: u8) -> Result<(), ErrorCode> {
let session = self.matchmake_manager.get_session(gid).await?;
let mut session = session.lock().await;
session.session.progress_score = progress;
Ok(())
}
async fn create_matchmake_session_with_param(
&self,
create_session_param: CreateMatchmakeSessionParam,
) -> Result<MatchmakeSession, ErrorCode> {
println!("{:?}", create_session_param);
let gid = self.matchmake_manager.next_gid();
let mut new_session = ExtendedMatchmakeSession::from_matchmake_session(
gid,
create_session_param.matchmake_session,
&self.this.clone(),
)
.await;
let mut joining_players = vec![self.this.clone()];
let users = self.matchmake_manager.users.read().await;
if let Ok(old_gathering) = self
.matchmake_manager
.get_session(create_session_param.gid_for_participation_check)
.await
{
let old_gathering = old_gathering.lock().await;
let players = old_gathering
.connected_players
.iter()
.filter_map(|v| v.upgrade())
.filter(|u| {
create_session_param
.additional_participants
.iter()
.any(|p| *p == u.pid)
});
for player in players {
joining_players.push(Arc::downgrade(&player));
}
}
drop(users);
new_session.session.participation_count = create_session_param.participation_count as u32;
new_session
.add_players(&joining_players, create_session_param.join_message)
.await;
let session = new_session.session.clone();
let mut sessions = self.matchmake_manager.sessions.write().await;
sessions.insert(gid, Arc::new(Mutex::new(new_session)));
drop(sessions);
Ok(session)
}
async fn join_matchmake_session_with_param(
&self,
join_session_param: JoinMatchmakeSessionParam,
) -> Result<MatchmakeSession, ErrorCode> {
let session = self
.matchmake_manager
.get_session(join_session_param.gid)
.await?;
let mut session = session.lock().await;
#[cfg(feature = "v3-5-0")]
if join_session_param.user_password != session.session.user_password {
return Err(ErrorCode::RendezVous_InvalidPassword);
}
session
.connected_players
.retain(|v| v.upgrade().is_some_and(|v| v.pid != self.pid));
let mut joining_players = vec![self.this.clone()];
let users = self.matchmake_manager.users.read().await;
if let Ok(old_gathering) = self
.matchmake_manager
.get_session(join_session_param.gid_for_participation_check)
.await
{
let old_gathering = old_gathering.lock().await;
let players = old_gathering
.connected_players
.iter()
.filter_map(|v| v.upgrade())
.filter(|u| {
join_session_param
.additional_participants
.iter()
.any(|p| *p == u.pid)
});
for player in players {
joining_players.push(Arc::downgrade(&player));
}
}
drop(users);
session
.add_players(&joining_players, join_session_param.join_message)
.await;
let mm_session = session.session.clone();
Ok(mm_session)
}
async fn auto_matchmake_with_param_postpone(
&self,
param: AutoMatchmakeParam,
) -> Result<MatchmakeSession, ErrorCode> {
println!("{:?}", param);
let mut joining_players = vec![self.this.clone()];
let users = self.matchmake_manager.users.read().await;
if let Ok(old_gathering) = self
.matchmake_manager
.get_session(param.gid_for_participation_check)
.await
{
let old_gathering = old_gathering.lock().await;
let players = old_gathering
.connected_players
.iter()
.filter_map(|v| v.upgrade())
.filter(|u| param.additional_participants.iter().any(|p| *p == u.pid));
for player in players {
joining_players.push(Arc::downgrade(&player));
}
}
drop(users);
let sessions = self.matchmake_manager.sessions.read().await;
for session in sessions.values() {
let mut session = session.lock().await;
println!("checking session!");
if !session.is_joinable() {
continue;
}
let mut bool_matched_criteria = false;
for criteria in &param.search_criteria {
if session.matches_criteria(criteria)? {
bool_matched_criteria = true;
}
}
if bool_matched_criteria {
session
.add_players(&joining_players, param.join_message)
.await;
return Ok(session.session.clone());
}
}
drop(sessions);
println!("making new session!");
let AutoMatchmakeParam {
join_message,
participation_count,
gid_for_participation_check,
matchmake_session,
additional_participants,
..
} = param;
self.create_matchmake_session_with_param(CreateMatchmakeSessionParam {
join_message,
participation_count,
gid_for_participation_check,
create_matchmake_session_option: 0,
matchmake_session,
additional_participants,
})
.await
}
async fn find_matchmake_session_by_gathering_id_detail(
&self,
gid: u32,
) -> Result<MatchmakeSession, ErrorCode> {
let session = self.matchmake_manager.get_session(gid).await?;
let session = session.lock().await;
Ok(session.session.clone())
}
async fn modify_current_game_attribute(
&self,
gid: u32,
attrib_index: u32,
attrib_val: u32,
) -> Result<(), ErrorCode> {
let session = self.matchmake_manager.get_session(gid).await?;
let mut session = session.lock().await;
session.session.attributes[attrib_index as usize] = attrib_val;
Ok(())
}
async fn create_matchmake_session(
&self,
gathering: Any,
message: String,
) -> Result<(u32, Vec<u8>), ErrorCode> {
info!("gathering: {:?}", gathering);
let Some(Ok(session)): Option<Result<MatchmakeSession, _>> = gathering.try_get() else {
return Err(ErrorCode::Core_InvalidArgument);
};
let session = self
.create_matchmake_session_with_param(CreateMatchmakeSessionParam {
matchmake_session: session,
additional_participants: vec![],
gid_for_participation_check: 0,
create_matchmake_session_option: 0,
join_message: message,
participation_count: 1,
})
.await?;
Ok((session.gathering.self_gid, session.session_key))
}
async fn get_friend_notification_data(
&self,
_ty: i32,
) -> Result<Vec<NotificationEvent>, ErrorCode> {
Ok(vec![])
}
async fn update_notification_data(
&self,
ty: u32,
param_1: u32,
param_2: u32,
str_param: String,
) -> Result<(), ErrorCode> {
let recpipent = param_2;
let Some(user) = self
.matchmake_manager
.users_by_pid
.read()
.await
.get(&recpipent)
.and_then(|v| v.upgrade())
else {
return Err(ErrorCode::Core_InvalidArgument);
};
println!("notif ty : {}", ty);
match ty {
REQUEST_JOIN_GATHERING => {
user.remote
.process_notification_event(NotificationEvent {
pid_source: self.pid,
notif_type: REQUEST_JOIN_GATHERING * 1000,
param_1,
param_2,
#[cfg(feature = "third-notif-param")]
param_3: 0,
str_param,
})
.await;
}
END_GATHERING => {
user.remote
.process_notification_event(NotificationEvent {
pid_source: self.pid,
notif_type: END_GATHERING * 1000,
param_1,
param_2,
#[cfg(feature = "third-notif-param")]
param_3: 0,
str_param,
})
.await;
}
_ => {
return Err(ErrorCode::Core_InvalidArgument);
}
}
Ok(())
}
async fn join_matchmake_session_ex(
&self,
gid: u32,
message: String,
_dont_care_block_list: bool,
//participation_count: u16,
) -> Result<Vec<u8>, ErrorCode> {
let sess = self.matchmake_manager.get_session(gid).await?;
let mut sess = sess.lock().await;
sess.add_players(&[self.this.clone()], message).await;
Ok(sess.session.session_key.clone())
}
async fn auto_matchmake_with_search_criteria_postpone(
&self,
criteria: Vec<MatchmakeSessionSearchCriteria>,
gathering: Any,
join_message: String,
) -> Result<Any, ErrorCode> {
let session: MatchmakeSession = gathering
.try_get()
.map(|v| v.ok())
.flatten()
.ok_or(ErrorCode::Core_InvalidArgument)?;
println!("{:?}", criteria);
let session = self
.auto_matchmake_with_param_postpone(AutoMatchmakeParam {
matchmake_session: session,
additional_participants: vec![],
gid_for_participation_check: 0,
auto_matchmake_option: 0,
join_message,
participation_count: 0,
search_criteria: criteria,
target_gids: vec![],
})
.await?;
let any = Any::new(&session).map_err(|_| ErrorCode::Core_SystemError)?;
Ok(any)
}
}
impl Matchmake for User {
async fn find_by_single_id(&self, gid: u32) -> Result<(bool, Any), ErrorCode> {
let s = self.matchmake_manager.get_session(gid).await?;
let s = s.lock().await;
Ok((
true,
Any::new(&s.session).map_err(|_| ErrorCode::Custom_Unknown)?,
))
}
async fn unregister_gathering(&self, _gid: u32) -> Result<bool, ErrorCode> {
Ok(true)
}
async fn get_session_urls(&self, gid: u32) -> Result<Vec<StationUrl>, ErrorCode> {
let session = self.matchmake_manager.get_session(gid).await?;
let session = session.lock().await;
let urls: Vec<_> = session
.connected_players
.iter()
.filter_map(|v| v.upgrade())
.filter(|u| u.pid == session.session.gathering.host_pid)
.map(|u| async move { u.station_url.read().await.clone() })
.next()
.ok_or(ErrorCode::RendezVous_SessionClosed)?
.await;
println!("{:?}", urls);
if urls.is_empty() {
return Err(ErrorCode::RendezVous_NotParticipatedGathering);
}
Ok(urls)
}
async fn update_session_host(
&self,
gid: u32,
change_session_owner: bool,
) -> Result<(), ErrorCode> {
let session = self.matchmake_manager.get_session(gid).await?;
let mut session = session.lock().await;
session.session.gathering.host_pid = self.pid;
for player in &session.connected_players {
let Some(player) = player.upgrade() else {
continue;
};
player
.remote
.process_notification_event(NotificationEvent {
notif_type: 110000,
pid_source: self.pid,
param_1: gid as PID,
param_2: self.pid,
#[cfg(feature = "third-notif-param")]
param_3: 0,
str_param: "".to_string(),
})
.await;
}
if change_session_owner {
session.session.gathering.owner_pid = self.pid;
for player in &session.connected_players {
let Some(player) = player.upgrade() else {
continue;
};
player
.remote
.process_notification_event(NotificationEvent {
notif_type: 4000,
pid_source: self.pid,
param_1: gid as PID,
param_2: self.pid,
#[cfg(feature = "third-notif-param")]
param_3: 0,
str_param: "".to_string(),
})
.await;
}
}
Ok(())
}
async fn migrate_gathering_ownership(
&self,
gid: u32,
candidates: Vec<PID>,
_participants_only: bool,
) -> Result<(), ErrorCode> {
let session = self.matchmake_manager.get_session(gid).await?;
let mut session = session.lock().await;
let candidate = candidates.get(0).ok_or(Core_InvalidArgument)?;
session.session.gathering.owner_pid = *candidate;
for player in &session.connected_players {
let Some(player) = player.upgrade() else {
continue;
};
player
.remote
.process_notification_event(NotificationEvent {
notif_type: 4000,
pid_source: self.pid,
param_1: gid as PID,
param_2: *candidate as PID,
#[cfg(feature = "third-notif-param")]
param_3: 0,
str_param: "".to_string(),
})
.await;
}
Ok(())
}
}
impl MatchmakeExt for User {
async fn end_participation(&self, gid: u32, message: String) -> Result<bool, ErrorCode> {
let session = self.matchmake_manager.get_session(gid).await?;
let mut session = session.lock().await;
session
.remove_player_from_session(self.pid, &message)
.await?;
Ok(true)
}
}
impl NatTraversal for User {
async fn report_nat_properties(
&self,
nat_mapping: u32,
nat_filtering: u32,
_rtt: u32,
) -> Result<(), ErrorCode> {
let mut urls = self.station_url.write().await;
for station_url in urls.iter_mut() {
station_url.options.retain(|o| match o {
NatMapping(_) | NatFiltering(_) => false,
_ => true,
});
station_url.options.push(NatMapping(nat_mapping as u8));
station_url.options.push(NatFiltering(nat_filtering as u8));
}
Ok(())
}
async fn report_nat_traversal_result(
&self,
_cid: u32,
_result: bool,
_rtt: u32,
) -> Result<(), ErrorCode> {
Ok(())
}
async fn request_probe_initiation(&self, _station_to_probe: String) -> Result<(), ErrorCode> {
info!("NO!");
Err(RendezVous_AccountExpired)
}
async fn request_probe_initialization_ext(
&self,
target_list: Vec<String>,
station_to_probe: String,
) -> Result<(), ErrorCode> {
let users = self.matchmake_manager.users.read().await;
println!(
"requesting station probe for {:?} to {:?}",
target_list, station_to_probe
);
for target in target_list {
let Ok(url) = StationUrl::try_from(target.as_ref()) else {
continue;
};
let Some(RVConnectionID(v)) = url
.options
.into_iter()
.find(|o| matches!(o, &RVConnectionID(_)))
else {
continue;
};
let Some(v) = users.get(&v) else {
continue;
};
let Some(user) = v.upgrade() else {
continue;
};
user.remote
.request_probe_initiation(station_to_probe.clone())
.await;
}
info!("finished probing");
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct CompetitionPostResults {
pub splatfest_id: u32,
pub score: u32,
pub team_id: u8,
pub team_win: u8,
pub user: PID,
}
// Seperate function because I cannot give a fuck right now
fn fetch_team_votes(fest_id: u32) -> Result<Vec<u32>, ErrorCode> {
let endpoint_votes = env::var("RNEX_SPLATOON_RESULTS_VOTES_GET").map_err(|_| {
error!("RNEX_SPLATOON_RESULTS_VOTES_GET not set");
ErrorCode::RendezVous_InvalidConfiguration
})?;
let url_votes = format!("{}?splatfest_id={}", endpoint_votes, fest_id);
let mut response = ureq::get(&url_votes).call().map_err(|e| {
error!("GET for votes failed: {:?}", e);
ErrorCode::RendezVous_InvalidConfiguration
})?;
let body = response.body_mut().read_to_string().map_err(|e| {
error!("failed to read votes body: {:?}", e);
ErrorCode::RendezVous_InvalidConfiguration
})?;
let body = body.trim().trim_start_matches('[').trim_end_matches(']');
let votes: Result<Vec<u32>, _> = body.split(',').map(|s| u32::from_str(s.trim())).collect();
votes.map_err(|e| {
error!("failed to parse votes: {:?}", e);
ErrorCode::RendezVous_InvalidConfiguration
})
}
impl Utility for User {
async fn acquire_nex_unique_id(&self) -> Result<u64, ErrorCode> {
return Ok(rand::random());
}
}
impl Ranking for User {
async fn competition_ranking_get_param(
&self,
param: CompetitionRankingGetParam,
) -> Result<Vec<CompetitionRankingScoreInfo>, ErrorCode> {
let fest_id = param.festival_ids.get(0).copied().unwrap_or(0);
let endpoint_results = env::var("RNEX_SPLATOON_RESULTS_GET").map_err(|_| {
error!("RNEX_SPLATOON_RESULTS_GET not set");
ErrorCode::RendezVous_InvalidConfiguration
})?;
let url_results = format!("{}?splatfest_id={}", endpoint_results, fest_id);
let response_results = ureq::get(&url_results).call();
let results: Vec<CompetitionPostResults> = match response_results {
Ok(mut res) => res.body_mut().read_json().map_err(|e| {
error!("failed to parse JSON: {:?}", e);
ErrorCode::RendezVous_InvalidConfiguration
})?,
Err(e) => {
error!("GET failed: {:?}", e);
return Err(ErrorCode::RendezVous_InvalidConfiguration);
}
};
let offset = param.range.offset as usize;
let size = param.range.size as usize;
let start = offset.min(results.len());
let end = (start + size).min(results.len());
let team_votes = fetch_team_votes(fest_id)?;
let mut wins = vec![0u32, 0u32];
for r in &results {
let won_team = (r.team_id ^ (!r.team_win)) & 1;
if let Some(team) = wins.get_mut(won_team as usize) {
*team += 1
};
}
let score_data: Vec<CompetitionRankingScoreData> = results[start..end]
.iter()
.map(|r| CompetitionRankingScoreData {
unk: 1,
pid: r.user,
score: r.score,
modified: KerberosDateTime::now(),
unk2: 1,
appdata: QBuffer(vec![]),
})
.collect();
let info = CompetitionRankingScoreInfo {
fest_id,
score_data,
unk: 0,
team_wins: wins,
team_votes,
};
println!("range: {:?}", param.range);
Ok(vec![info])
}
async fn upload_competition_ranking_score(
&self,
param: UploadCompetitionData,
) -> Result<bool, ErrorCode> {
info!("fest results for user {:?}:", self.pid);
info!("fest id: {:?}", param.splatfest_id);
info!("score: {:?}", param.score);
info!("team id: {:?}", param.team_id);
info!("did current team win: {:?}", param.team_win);
let endpoint = match env::var("RNEX_SPLATOON_RESULTS_POST") {
Ok(url) => url,
Err(_) => {
error!("RNEX_SPLATOON_RESULTS_POST not set");
return Ok(false);
}
};
let payload = CompetitionPostResults {
splatfest_id: param.splatfest_id,
score: param.score,
team_id: param.team_id,
team_win: param.team_win,
user: self.pid,
};
let json_body = match serde_json::to_string(&payload) {
Ok(j) => j,
Err(e) => {
error!("error making json_body: {:?}", e);
return Ok(false);
}
};
let response = ureq::post(&endpoint)
.header("Content-Type", "application/json")
.send(json_body);
match response {
Ok(res) => {
info!("POST worked: {}", res.status());
}
Err(e) => {
error!("POST borked: {:?}", e);
}
}
Ok(true)
}
}

View file

@ -1,20 +0,0 @@
use std::sync::LazyLock;
use rc4::{Key, StreamCipher};
use typenum::U5;
pub struct EncryptionPair<T: StreamCipher + Send> {
pub send: T,
pub recv: T,
}
impl<T: StreamCipher + Send> EncryptionPair<T> {
pub fn init_both<F: Fn() -> T>(func: F) -> Self {
Self {
recv: func(),
send: func(),
}
}
}
pub static DEFAULT_KEY: LazyLock<Key<U5>> = LazyLock::new(|| Key::from(*b"CD&ML"));

View file

@ -1,6 +0,0 @@
pub mod encryption;
pub mod socket_addr;
pub mod station_url;
pub mod ticket;
pub mod types_flags;
pub mod virtual_port;

View file

@ -1,41 +0,0 @@
use hmac::Hmac;
use macros::RmcSerialize;
use md5::digest::Mac;
use rnex_core::prudp::virtual_port::VirtualPort;
use std::io::Write;
use std::net::{IpAddr, SocketAddr};
type Md5Hmac = Hmac<md5::Md5>;
#[derive(Eq, PartialEq, Hash, Debug, Copy, Clone, Ord, PartialOrd, RmcSerialize)]
#[rmc_struct(0)]
pub struct PRUDPSockAddr {
pub regular_socket_addr: SocketAddr,
pub virtual_port: VirtualPort,
}
impl PRUDPSockAddr {
pub fn new(regular_socket_addr: SocketAddr, virtual_port: VirtualPort) -> Self {
Self {
regular_socket_addr,
virtual_port,
}
}
pub fn calculate_connection_signature(&self) -> [u8; 16] {
let mut hmac = Md5Hmac::new_from_slice(&[0; 16]).expect("?");
let data = match self.regular_socket_addr.ip() {
IpAddr::V4(v) => v.octets().to_vec(),
IpAddr::V6(v) => v.octets().to_vec(),
};
//data.extend_from_slice(&self.regular_socket_addr.port().to_be_bytes());
hmac.write_all(&data)
.expect("figuring this out was complete ass");
let result: [u8; 16] = hmac.finalize().into_bytes()[0..16]
.try_into()
.expect("fuck");
result
}
}

View file

@ -1,173 +0,0 @@
use crate::prudp::station_url::Type::{PRUDP, PRUDPS, UDP};
use crate::prudp::station_url::UrlOptions::{
Address, ConnectionID, NatFiltering, NatMapping, NatType, PID, PMP, Platform, Port,
PrincipalID, RVConnectionID, StreamID, StreamType, UPNP,
};
use crate::rmc::structures::Error::StationUrlInvalid;
use crate::rmc::structures::RmcSerialize;
use crate::rmc::structures::helpers::DummyFormatWriter;
use log::error;
use std::fmt::{Debug, Display, Formatter, Write};
use std::io::Read;
use std::net::IpAddr;
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum Type {
UDP,
PRUDP,
PRUDPS,
}
pub mod nat_types {
pub const BEHIND_NAT: u8 = 1;
pub const PUBLIC: u8 = 2;
}
#[derive(Clone, Eq, PartialEq)]
pub enum UrlOptions {
Address(IpAddr),
Port(u16),
StreamType(u8),
StreamID(u8),
ConnectionID(u8),
PrincipalID(rnex_core::PID),
NatType(u8),
NatMapping(u8),
NatFiltering(u8),
UPNP(u8),
RVConnectionID(u32),
Platform(u8),
PMP(u8),
PID(u32),
}
#[derive(Clone, PartialEq, Eq)]
pub struct StationUrl {
pub url_type: Type,
pub options: Vec<UrlOptions>,
}
impl StationUrl {
pub fn read_options(options: &str) -> Option<Vec<UrlOptions>> {
let mut options_out = Vec::new();
for option in options.split(';') {
if option == "" {
continue;
}
let mut option_parts = option.split('=');
let option_name = option_parts.next()?.to_ascii_lowercase();
let option_value = option_parts.next()?;
match option_name.as_ref() {
"address" => options_out.push(Address(option_value.parse().ok()?)),
"port" => options_out.push(Port(option_value.parse().ok()?)),
"natf" => options_out.push(NatFiltering(option_value.parse().ok()?)),
"natm" => options_out.push(NatMapping(option_value.parse().ok()?)),
"sid" => options_out.push(StreamID(option_value.parse().ok()?)),
"upnp" => options_out.push(UPNP(option_value.parse().ok()?)),
"type" => options_out.push(NatType(option_value.parse().ok()?)),
"stream" => options_out.push(StreamType(option_value.parse().ok()?)),
"RVCID" => options_out.push(RVConnectionID(option_value.parse().ok()?)),
"rvcid" => options_out.push(RVConnectionID(option_value.parse().ok()?)),
"pl" => options_out.push(Platform(option_value.parse().ok()?)),
"pmp" => options_out.push(PMP(option_value.parse().ok()?)),
"pid" => options_out.push(PID(option_value.parse().ok()?)),
"PID" => options_out.push(PID(option_value.parse().ok()?)),
_ => {
error!("unimplemented option type, skipping: {}", option_name);
}
}
}
Some(options_out)
}
}
impl TryFrom<&str> for StationUrl {
type Error = ();
fn try_from(value: &str) -> Result<Self, ()> {
let (url_type, options) = value.split_at(value.find(":/").ok_or(())?);
let options = &options[2..];
let url_type = match url_type {
"udp" => UDP,
"prudp" => PRUDP,
"prudps" => PRUDPS,
_ => return Err(()),
};
let options = Self::read_options(options).ok_or(())?;
Ok(Self { url_type, options })
}
}
impl Display for StationUrl {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let url_type_str = match self.url_type {
UDP => "udp:/",
PRUDP => "prudp:/",
PRUDPS => "prudps:/",
};
write!(f, "{}", url_type_str)?;
for option in &self.options {
match option {
Address(v) => write!(f, "address={}", v)?,
Port(v) => write!(f, "port={}", v)?,
StreamType(v) => write!(f, "stream={}", v)?,
StreamID(v) => write!(f, "sid={}", v)?,
ConnectionID(v) => write!(f, "CID={}", v)?,
PrincipalID(v) => write!(f, "PID={}", v)?,
NatType(v) => write!(f, "type={}", v)?,
NatMapping(v) => write!(f, "natm={}", v)?,
NatFiltering(v) => write!(f, "natf={}", v)?,
UPNP(v) => write!(f, "upnp={}", v)?,
RVConnectionID(v) => write!(f, "RVCID={}", v)?,
Platform(v) => write!(f, "pl={}", v)?,
PMP(v) => write!(f, "pmp={}", v)?,
PID(v) => write!(f, "PID={}", v)?,
}
write!(f, ";")?;
}
Ok(())
}
}
impl<'a> Into<String> for &'a StationUrl {
fn into(self) -> String {
let url = self.to_string();
url[0..url.len() - 1].into()
}
}
impl RmcSerialize for StationUrl {
fn deserialize(reader: &mut impl Read) -> crate::rmc::structures::Result<Self> {
let str = String::deserialize(reader)?;
Self::try_from(str.as_str()).map_err(|_| StationUrlInvalid)
}
fn serialize(&self, writer: &mut impl std::io::Write) -> crate::rmc::structures::Result<()> {
let str: String = self.into();
str.serialize(writer)
}
fn serialize_write_size(&self) -> crate::rmc::structures::Result<u32> {
let mut dummy = DummyFormatWriter::new();
write!(&mut dummy, "{}", self)?;
Ok(dummy.serialize_str_len())
}
}
impl Debug for StationUrl {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let str: String = self.into();
f.write_str(&str)
}
}

View file

@ -1,79 +0,0 @@
use std::io::Cursor;
use log::{error, info};
use rc4::{KeyInit, Rc4, Rc4Core, StreamCipher, cipher::StreamCipherCoreWrapper};
use typenum::U16;
use v_byte_helpers::{IS_BIG_ENDIAN, ReadExtensions};
use crate::{
kerberos::{SESSION_KEY_LENGTH, SessionLengthTy, TicketInternalData, derive_key},
nex::account::Account,
rmc::structures::RmcSerialize,
};
use rnex_core::PID;
pub fn read_secure_connection_data(
data: &[u8],
act: &Account,
) -> Option<([u8; SESSION_KEY_LENGTH], PID, u32)> {
let mut cursor = Cursor::new(data);
let mut ticket_data: Vec<u8> = Vec::deserialize(&mut cursor).ok()?;
let mut request_data: Vec<u8> = Vec::deserialize(&mut cursor).ok()?;
info!("done request data {}", SESSION_KEY_LENGTH);
let ticket_data_size = ticket_data.len();
let ticket_data = &mut ticket_data[0..ticket_data_size - 0x10];
let server_key = derive_key(act.pid, &act.kerbros_password[..]);
let mut rc4: StreamCipherCoreWrapper<Rc4Core<U16>> =
Rc4::new_from_slice(&server_key).expect("unable to init rc4 keystream");
rc4.apply_keystream(ticket_data);
let ticket_data: &TicketInternalData = match bytemuck::try_from_bytes(ticket_data) {
Ok(v) => v,
Err(e) => {
error!("unable to read internal ticket data: {}", e);
return None;
}
};
// todo: add ticket expiration
let TicketInternalData {
session_key,
pid: ticket_source_pid,
issued_time,
} = *ticket_data;
// todo: add checking if tickets are signed with a valid md5-hmac
let request_data_length = request_data.len();
let request_data = &mut request_data[0..request_data_length - 0x10];
let mut rc4: StreamCipherCoreWrapper<Rc4Core<SessionLengthTy>> =
Rc4::new_from_slice(&session_key).expect("unable to init rc4 keystream");
rc4.apply_keystream(request_data);
let mut reqest_data_cursor = Cursor::new(request_data);
let pid: PID = reqest_data_cursor.read_struct(IS_BIG_ENDIAN).ok()?;
if pid != ticket_source_pid {
let ticket_created_on = issued_time.to_regular_time();
error!(
"someone tried to spoof their pid, ticket was created on: {}",
ticket_created_on.to_rfc2822()
);
return None;
}
let _cid: u32 = reqest_data_cursor.read_struct(IS_BIG_ENDIAN).ok()?;
let response_check: u32 = reqest_data_cursor.read_struct(IS_BIG_ENDIAN).ok()?;
Some((session_key, pid, response_check))
}

View file

@ -1,64 +0,0 @@
use std::fmt::{Debug, Formatter};
use bytemuck::{Pod, Zeroable};
use v_byte_helpers::SwapEndian;
#[repr(transparent)]
#[derive(Copy, Clone, Pod, Zeroable, SwapEndian, Default, Eq, PartialEq)]
pub struct TypesFlags(pub u16);
impl TypesFlags {
#[inline]
pub const fn get_types(self) -> u8 {
(self.0 & 0x000F) as u8
}
#[inline]
pub const fn get_flags(self) -> u16 {
(self.0 & 0xFFF0) >> 4
}
#[inline]
pub const fn types(self, val: u8) -> Self {
Self((self.0 & 0xFFF0) | (val as u16 & 0x000F))
}
#[inline]
pub const fn flags(self, val: u16) -> Self {
Self((self.0 & 0x000F) | ((val << 4) & 0xFFF0))
}
#[inline]
pub const fn set_flag(&mut self, val: u16) {
self.0 |= (val & 0xFFF) << 4;
}
#[inline]
pub const fn set_types(&mut self, val: u8) {
self.0 |= val as u16 & 0x0F;
}
}
impl Debug for TypesFlags {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let stream_type = self.get_types();
let port_number = self.get_flags();
write!(
f,
"TypesFlags{{ types: {}, flags: {} }}",
stream_type, port_number
)
}
}
pub mod flags {
pub const ACK: u16 = 0x001;
pub const RELIABLE: u16 = 0x002;
pub const NEED_ACK: u16 = 0x004;
pub const HAS_SIZE: u16 = 0x008;
pub const MULTI_ACK: u16 = 0x200;
}
pub mod types {
pub const SYN: u8 = 0x0;
pub const CONNECT: u8 = 0x1;
pub const DATA: u8 = 0x2;
pub const DISCONNECT: u8 = 0x3;
pub const PING: u8 = 0x4;
/// no idea what user is supposed to mean
pub const USER: u8 = 0x5;
}

View file

@ -1,57 +0,0 @@
use bytemuck::{Pod, Zeroable};
use std::fmt::{Debug, Formatter};
use v_byte_helpers::SwapEndian;
#[repr(transparent)]
#[derive(PartialEq, Eq, Ord, PartialOrd, Copy, Clone, Pod, Zeroable, SwapEndian, Hash, Default)]
pub struct VirtualPort(pub u8);
impl VirtualPort {
#[inline]
pub const fn get_stream_type(self) -> u8 {
(self.0 & 0xF0) >> 4
}
#[inline]
pub const fn get_port_number(self) -> u8 {
self.0 & 0x0F
}
#[inline]
pub fn stream_type(self, val: u8) -> Self {
let masked_val = val & 0x0F;
assert_eq!(masked_val, val);
Self((self.0 & 0x0F) | (masked_val << 4))
}
#[inline]
pub fn port_number(self, val: u8) -> Self {
let masked_val = val & 0x0F;
assert_eq!(masked_val, val);
Self((self.0 & 0xF0) | masked_val)
}
#[inline]
pub fn new(port: u8, stream_type: u8) -> Self {
Self(0).stream_type(stream_type).port_number(port)
}
#[inline(always)]
pub fn parse(data: &str) -> Option<Self> {
let (p1, p2) = data.split_once(':')?;
Some(Self::new(p1.parse().ok()?, p2.parse().ok()?))
}
}
impl Debug for VirtualPort {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let stream_type = self.get_stream_type();
let port_number = self.get_port_number();
write!(
f,
"VirtualPort{{ stream_type: {}, port_number: {} }}",
stream_type, port_number
)
}
}

View file

@ -1,60 +0,0 @@
use crate::define_rmc_proto;
use crate::rmc::structures::RmcSerialize;
use macros::{RmcSerialize, method_id, rmc_proto};
use rnex_core::rmc::response::ErrorCode;
use std::io;
use std::net::SocketAddrV4;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
pub trait UnitPacketRead: AsyncRead + Unpin {
async fn read_buffer(&mut self) -> Result<Vec<u8>, io::Error> {
let mut len_raw: [u8; 4] = [0; 4];
self.read_exact(&mut len_raw).await?;
let len = u32::from_le_bytes(len_raw);
let mut vec = vec![0u8; len as _];
self.read_exact(&mut vec).await?;
Ok(vec)
}
}
impl<T: AsyncRead + Unpin> UnitPacketRead for T {}
pub trait UnitPacketWrite: AsyncWrite + Unpin {
async fn send_buffer(&mut self, data: &[u8]) -> Result<(), io::Error> {
let mut dest_data = Vec::new();
data.serialize(&mut dest_data)
.expect("ran out of memory or something");
self.write_all(&dest_data[..]).await?;
self.flush().await?;
Ok(())
}
}
impl<T: AsyncWrite + Unpin> UnitPacketWrite for T {}
#[rmc_proto(1)]
pub trait EdgeNodeManagement {
#[method_id(1)]
async fn get_url(&self, seed: u64) -> Result<SocketAddrV4, ErrorCode>;
}
define_rmc_proto!(
proto EdgeNodeHolder{
EdgeNodeManagement
}
);
#[derive(RmcSerialize, Debug)]
#[repr(u32)]
pub enum EdgeNodeHolderConnectOption {
DontRegister = 0,
Register(SocketAddrV4) = 1,
}

View file

@ -1,30 +0,0 @@
use macros::{RmcSerialize, method_id, rmc_proto};
use rnex_core::{
PID,
rmc::{response::ErrorCode, structures::any::Any},
};
use crate::{kerberos::KerberosDateTime, rmc::protocols::friends::NNAInfo};
#[derive(RmcSerialize, Debug, Clone)]
#[rmc_struct(0)]
pub struct NintendoCreateAccountData {
pub nna_info: NNAInfo,
pub nex_token: String,
pub birthday: KerberosDateTime,
pub unk: u64,
}
#[rmc_proto(25)]
pub trait AccountManagement {
#[method_id(27)]
async fn nintendo_create_account(
&self,
principal_name: String,
key: String,
groups: u32,
email: String,
auth_data: Any,
) -> Result<(PID, String), ErrorCode>;
}

View file

@ -1,266 +0,0 @@
use macros::{RmcSerialize, method_id, rmc_proto};
use rnex_core::PID;
use rnex_core::kerberos::KerberosDateTime;
use rnex_core::rmc::response::ErrorCode;
use rnex_core::rmc::structures::qbuffer::QBuffer;
use rnex_core::rmc::structures::qresult::QResult;
use rnex_core::rmc::structures::resultsrange::ResultsRange;
#[derive(RmcSerialize, Clone, Debug, Default)]
#[rmc_struct(0)]
pub struct PersistenceTarget {
pub owner: PID,
pub persistence_slot_id: u16,
}
#[derive(RmcSerialize, Clone, Debug, Default)]
#[rmc_struct(0)]
pub struct Permission {
pub permission: u8,
pub recipient_ids: Vec<PID>,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct RatingInfoWithSlot {
pub slot: i8,
pub rating: RatingInfo,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct RatingInfo {
pub total_value: i64,
pub count: u32,
pub initial_value: i64,
}
#[derive(RmcSerialize, Clone, Default)]
#[rmc_struct(0)]
pub struct GetMetaParam {
pub dataid: u64,
pub persistence_target: PersistenceTarget,
pub result_option: u8,
pub access_password: u64,
}
#[derive(RmcSerialize, Clone, Default)]
#[rmc_struct(0)]
pub struct GetMetaInfo {
pub dataid: u64,
pub owner: PID,
pub size: u32,
pub name: String,
pub data_type: u16,
pub meta_binary: QBuffer,
pub permission: Permission,
pub del_permission: Permission,
pub created_time: KerberosDateTime,
pub updated_time: KerberosDateTime,
pub period: u16,
pub status: u8,
pub referred_count: u32,
pub refer_dat_id: u32,
pub flag: u32,
pub referred_time: KerberosDateTime,
pub expire_time: KerberosDateTime,
pub tags: Vec<String>,
pub ratings: Vec<RatingInfoWithSlot>,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct RatingInitParam {
pub flag: u8,
pub internal_flag: u8,
pub lock_type: u8,
pub intial_valie: i64,
pub range_min: i32,
pub range_max: i32,
pub period_hour: i8,
pub period_duration: i16,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct RatingInitParamWithSlot {
pub slot: i8,
pub param: RatingInitParam,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct PersistenceInitParam {
pub persistence_slot_id: u16,
pub delete_last_object: bool,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct KeyValue {
pub key: String,
pub value: String,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct PreparePostParam {
pub size: u32,
pub name: String,
pub data_type: u16,
pub meta_binary: QBuffer,
pub permission: Permission,
pub del_permission: Permission,
pub flag: u32,
pub period: u16,
pub refer_data_id: u32,
pub tags: Vec<String>,
pub rating_init_params: Vec<RatingInitParamWithSlot>,
pub persistence_init_param: PersistenceInitParam,
pub extra_data: Vec<String>,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct ReqPostInfo {
pub dataid: u64,
pub url: String,
pub request_headers: Vec<KeyValue>,
pub form_fields: Vec<KeyValue>,
pub root_ca_cert: Vec<u8>,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct CompletePostParam {
pub dataid: u64,
pub success: bool,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct RateCustomRankingParam {
pub dataid: u64,
pub appid: u32,
pub score: u32,
pub period: u16,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct BufferQueueParam {
pub dataid: u64,
pub slot: u32,
}
// I just realized I forgot to add "DataStore" in front of the structs. I can't be assed to change it, sucks to be you lol.
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct DataStoreGetCustomRankingByDataIDParam {
pub application_id: u32,
pub data_id_list: Vec<u64>,
pub result_option: u8,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct DataStoreCustomRankingResult {
pub order: u32,
pub score: u32,
pub meta_info: GetMetaInfo,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct DataStorePrepareGetParam {
pub dataid: u64,
pub lockid: u32,
pub persistence_target: PersistenceTarget,
pub access_password: u64,
pub extra_data: Vec<String>,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(0)]
pub struct DataStoreReqGetInfo {
pub url: String,
pub request_headers: Vec<KeyValue>,
pub size: u32,
pub root_ca_cert: Vec<u8>,
pub dataid: u64,
}
#[derive(RmcSerialize, Clone)]
#[rmc_struct(1)]
pub struct DataStoreSearchParam {
pub search_target: u8,
pub owner_ids: Vec<PID>,
pub owner_type: u8,
pub destination_ids: Vec<u64>,
pub data_type: u16,
pub created_after: KerberosDateTime,
pub created_before: KerberosDateTime,
pub updated_after: KerberosDateTime,
pub updated_before: KerberosDateTime,
pub refer_dat_id: u32,
pub tags: Vec<String>,
pub result_order_column: u8,
pub result_order: u8,
pub result_range: ResultsRange,
pub result_option: u8,
pub minimal_rating_frequency: u32,
pub use_cache: bool,
}
#[rmc_proto(115)]
pub trait DataStore {
#[method_id(8)]
async fn get_meta(&self, metaparam: GetMetaParam) -> Result<GetMetaInfo, ErrorCode>;
#[method_id(36)]
async fn get_metas_multiple_param(
&self,
params: Vec<GetMetaParam>,
) -> Result<(Vec<GetMetaInfo>, Vec<QResult>), ErrorCode>;
#[method_id(24)]
async fn prepare_post_object(
&self,
postparam: PreparePostParam,
) -> Result<ReqPostInfo, ErrorCode>;
#[method_id(26)]
async fn complete_post_object(&self, completeparam: CompletePostParam)
-> Result<(), ErrorCode>;
#[method_id(48)]
async fn rate_custom_ranking(
&self,
rankingparam: Vec<RateCustomRankingParam>,
) -> Result<(), ErrorCode>;
#[method_id(61)]
async fn get_application_config(&self, appid: u32) -> Result<Vec<i32>, ErrorCode>;
#[method_id(50)]
async fn get_custom_ranking_by_data_id(
&self,
custom_ranking_param: DataStoreGetCustomRankingByDataIDParam,
) -> Result<(Vec<DataStoreCustomRankingResult>, Vec<QResult>), ErrorCode>;
#[method_id(54)]
async fn get_buffer_queue(
&self,
bufferparam: BufferQueueParam,
) -> Result<Vec<QBuffer>, ErrorCode>;
#[method_id(25)]
async fn prepare_get_object(
&self,
prepare_get_param: DataStorePrepareGetParam,
) -> Result<DataStoreReqGetInfo, ErrorCode>;
#[method_id(65)]
async fn followings_latest_course_search_object(
&self,
course_search_param: DataStoreSearchParam,
extra_data: Vec<String>,
) -> Result<Vec<DataStoreCustomRankingResult>, ErrorCode>;
#[method_id(74)]
async fn get_application_config_string(
&self,
application_id: u32,
) -> Result<Vec<String>, ErrorCode>;
}

View file

@ -1,183 +0,0 @@
use macros::{RmcSerialize, method_id, rmc_proto};
use rnex_core::{kerberos::KerberosDateTime, rmc::response::ErrorCode};
use rnex_core::rmc::structures::data::Data;
#[derive(RmcSerialize, Debug, Clone)]
#[rmc_struct(0)]
pub struct MiiV2 {
#[extends]
pub data: Data,
pub name: String,
pub unk: u8,
pub unk2: u8,
pub mii_data: Vec<u8>,
pub date_time: KerberosDateTime,
}
#[derive(RmcSerialize, Debug, Clone)]
#[rmc_struct(0)]
pub struct PrincipalBasicInfo {
#[extends]
pub data: Data,
pub pid: u32,
pub nnid: String,
pub mii: MiiV2,
pub unk: u8,
}
#[derive(RmcSerialize, Debug, Clone)]
#[rmc_struct(0)]
pub struct NNAInfo {
#[extends]
pub data: Data,
pub principal_basic_info: PrincipalBasicInfo,
pub unk: u8,
pub unk2: u8,
}
#[derive(RmcSerialize, Clone, Copy, Debug)]
#[rmc_struct(0)]
pub struct GameKey {
#[extends]
pub data: Data,
pub tid: u64,
pub version: u16,
}
#[derive(RmcSerialize, Clone, Debug)]
#[rmc_struct(0)]
pub struct NintendoPresenceV2 {
#[extends]
pub data: Data,
pub changed_flags: u32,
pub is_online: bool,
pub game_key: GameKey,
pub unk: u8,
pub message: String,
pub unk2: u32,
pub unk3: u8,
pub game_server_id: u32,
pub unk4: u32,
pub pid: u32,
pub gid: u32,
pub app_data: Vec<u8>,
pub unk5: u8,
pub unk6: u8,
pub unk7: u8,
}
#[derive(RmcSerialize, Clone, Debug)]
#[rmc_struct(0)]
pub struct PrincipalPreference {
#[extends]
pub data: Data,
pub show_online: bool,
pub show_playing_title: bool,
pub block_friend_request: bool,
}
#[derive(RmcSerialize)]
#[rmc_struct(0)]
pub struct Comment {
#[extends]
pub data: Data,
pub unk: u8,
pub message: String,
pub last_changed: KerberosDateTime,
}
#[derive(RmcSerialize)]
#[rmc_struct(0)]
pub struct FriendInfo {
#[extends]
pub data: Data,
pub nna_info: NNAInfo,
pub presence: NintendoPresenceV2,
pub comment: Comment,
pub became_friends: KerberosDateTime,
pub last_online: KerberosDateTime,
pub unk: u64,
}
#[derive(RmcSerialize)]
#[rmc_struct(0)]
pub struct FriendRequestMessage {
#[extends]
pub data: Data,
pub friend_request_id: u64,
pub is_recieved: u8,
pub unk: u8,
pub message: String,
pub unk2: u8,
pub unk3: String,
pub game_key: GameKey,
pub unk4: KerberosDateTime,
pub expires_on: KerberosDateTime,
}
#[derive(RmcSerialize)]
#[rmc_struct(0)]
pub struct FriendRequest {
#[extends]
pub data: Data,
pub basic_info: PrincipalBasicInfo,
pub request_message: FriendRequestMessage,
pub sent_on: KerberosDateTime,
}
#[derive(RmcSerialize)]
#[rmc_struct(0)]
pub struct BlacklistedPrincipal {
#[extends]
pub data: Data,
pub basic_info: PrincipalBasicInfo,
pub game_key: GameKey,
pub since: KerberosDateTime,
}
#[derive(RmcSerialize)]
#[rmc_struct(0)]
pub struct PersistentNotification {
#[extends]
pub data: Data,
pub unk1: u64,
pub unk2: u32,
pub unk3: u32,
pub unk4: u32,
pub unk5: String,
}
#[rmc_proto(102)]
pub trait Friends {
#[method_id(1)]
async fn update_and_get_all_information(
&self,
info: NNAInfo,
presence: NintendoPresenceV2,
date_time: KerberosDateTime,
) -> Result<
(
PrincipalPreference,
Comment,
Vec<FriendInfo>,
Vec<FriendRequest>,
Vec<FriendRequest>,
Vec<BlacklistedPrincipal>,
bool,
Vec<PersistentNotification>,
bool,
),
ErrorCode,
>;
#[method_id(13)]
async fn update_presence(&self, presence: NintendoPresenceV2) -> Result<(), ErrorCode>;
#[method_id(16)]
async fn update_preference(&self, preference: PrincipalPreference) -> Result<(), ErrorCode>;
#[method_id(18)]
async fn delete_persistent_notification(
&self,
notifs: Vec<PersistentNotification>,
) -> Result<(), ErrorCode>;
#[method_id(19)]
async fn check_setting_status(&self) -> Result<u8, ErrorCode>;
}

View file

@ -1,26 +0,0 @@
use macros::{method_id, rmc_proto};
use rnex_core::prudp::station_url::StationUrl;
use rnex_core::rmc::response::ErrorCode;
use rnex_core::PID;
use crate::rmc::structures::any::Any;
#[rmc_proto(21)]
pub trait Matchmake {
#[method_id(2)]
async fn unregister_gathering(&self, gid: u32) -> Result<bool, ErrorCode>;
#[method_id(21)]
async fn find_by_single_id(&self, gid: u32) -> Result<(bool, Any), ErrorCode>;
#[method_id(41)]
async fn get_session_urls(&self, gid: u32) -> Result<Vec<StationUrl>, ErrorCode>;
#[method_id(42)]
async fn update_session_host(&self, gid: u32, change_owner: bool) -> Result<(), ErrorCode>;
#[method_id(44)]
async fn migrate_gathering_ownership(
&self,
gid: u32,
candidates: Vec<PID>,
participants_only: bool,
) -> Result<(), ErrorCode>;
}

View file

@ -1,94 +0,0 @@
use macros::{method_id, rmc_proto};
use rnex_core::rmc::response::ErrorCode;
use rnex_core::rmc::structures::matchmake::{
AutoMatchmakeParam, CreateMatchmakeSessionParam, JoinMatchmakeSessionParam, MatchmakeSession,
};
use crate::rmc::protocols::notifications::NotificationEvent;
use crate::rmc::structures::any::Any;
use crate::rmc::structures::matchmake::MatchmakeSessionSearchCriteria;
#[rmc_proto(109)]
pub trait MatchmakeExtension {
#[method_id(1)]
async fn close_participation(&self, gid: u32) -> Result<(), ErrorCode>;
#[method_id(2)]
async fn open_participation(&self, gid: u32) -> Result<(), ErrorCode>;
#[method_id(6)]
async fn create_matchmake_session(
&self,
gathering: Any,
message: String,
) -> Result<(u32, Vec<u8>), ErrorCode>;
#[method_id(9)]
async fn update_notification_data(
&self,
ty: u32,
param1: u32,
param2: u32,
str_param: String,
) -> Result<(), ErrorCode>;
#[method_id(10)]
async fn get_friend_notification_data(
&self,
ty: i32,
) -> Result<Vec<NotificationEvent>, ErrorCode>;
#[method_id(15)]
async fn auto_matchmake_with_search_criteria_postpone(
&self,
criteria: Vec<MatchmakeSessionSearchCriteria>,
gathering: Any,
join_msg: String,
) -> Result<Any, ErrorCode>;
#[method_id(30)]
async fn join_matchmake_session_ex(
&self,
gid: u32,
message: String,
dont_care_block_list: bool,
// this is to cheat support for v3-3-0
//participation_count: u16,
) -> Result<Vec<u8>, ErrorCode>;
#[method_id(8)]
async fn modify_current_game_attribute(
&self,
gid: u32,
attrib_index: u32,
attrib_val: u32,
) -> Result<(), ErrorCode>;
#[method_id(16)]
async fn get_playing_session(&self, pids: Vec<u32>) -> Result<Vec<()>, ErrorCode>;
#[method_id(34)]
#[cfg(feature = "v3-5-0")]
async fn update_progress_score(&self, gid: u32, progress: u8) -> Result<(), ErrorCode>;
#[method_id(38)]
async fn create_matchmake_session_with_param(
&self,
session: CreateMatchmakeSessionParam,
) -> Result<MatchmakeSession, ErrorCode>;
#[method_id(39)]
async fn join_matchmake_session_with_param(
&self,
session: JoinMatchmakeSessionParam,
) -> Result<MatchmakeSession, ErrorCode>;
#[method_id(40)]
async fn auto_matchmake_with_param_postpone(
&self,
session: AutoMatchmakeParam,
) -> Result<MatchmakeSession, ErrorCode>;
#[method_id(41)]
async fn find_matchmake_session_by_gathering_id_detail(
&self,
gid: u32,
) -> Result<MatchmakeSession, ErrorCode>;
}

View file

@ -1,39 +0,0 @@
use macros::{RmcSerialize, method_id, rmc_proto};
use rnex_core::PID;
use rnex_core::rmc::structures::any::Any;
#[derive(RmcSerialize)]
#[rmc_struct(0)]
pub struct NintendoNotificationEvent {
pub event_type: u32,
pub sender: PID,
pub data: Any,
}
#[derive(RmcSerialize)]
#[rmc_struct(0)]
pub struct NintendoNotificationEventGeneral {
pub param1: u32,
pub param2: u64,
pub param3: u64,
pub str_param: String,
}
#[derive(RmcSerialize)]
#[rmc_struct(0)]
pub struct NintendoNotificationEventProfile {
pub region: u8,
pub country: u8,
pub area: u8,
pub language: u8,
pub platform: u8,
}
#[rmc_proto(100, NoReturn)]
pub trait NintendoNotification {
#[method_id(1)]
async fn process_nintendo_notification_event_1(&self, notif: NintendoNotificationEvent);
#[method_id(2)]
async fn process_nintendo_notification_event_2(&self, notif: NintendoNotificationEvent);
}

View file

@ -1,41 +0,0 @@
use macros::{RmcSerialize, method_id, rmc_proto};
use rnex_core::PID;
pub mod notification_types {
pub const OWNERSHIP_CHANGED: u32 = 4000;
pub const HOST_CHANGED: u32 = 110000;
pub const REQUEST_JOIN_GATHERING: u32 = 101;
pub const END_GATHERING: u32 = 102;
}
cfg_if::cfg_if! {
if #[cfg(feature = "third-notif-param")]{
#[derive(RmcSerialize, Debug, Default, Clone)]
#[rmc_struct(0)]
pub struct NotificationEvent {
pub pid_source: PID,
pub notif_type: u32,
pub param_1: PID,
pub param_2: PID,
pub str_param: String,
pub param_3: PID,
}
} else {
#[derive(RmcSerialize, Debug, Default, Clone)]
#[rmc_struct(0)]
pub struct NotificationEvent {
pub pid_source: PID,
pub notif_type: u32,
pub param_1: PID,
pub param_2: PID,
pub str_param: String,
}
}
}
#[rmc_proto(14, NoReturn)]
pub trait Notification {
#[method_id(1)]
async fn process_notification_event(&self, event: NotificationEvent);
}

View file

@ -1,50 +0,0 @@
use macros::{RmcSerialize, method_id, rmc_proto};
use rnex_core::kerberos::KerberosDateTime;
use rnex_core::rmc::structures::qbuffer::QBuffer;
use rnex_core::rmc::structures::resultsrange::ResultsRange;
use rnex_core::rmc::response::ErrorCode;
use rnex_core::rmc::structures::ranking::UploadCompetitionData;
#[derive(RmcSerialize, Debug, Default, Clone)]
#[rmc_struct(1)]
pub struct CompetitionRankingGetParam {
pub unk: u32,
pub range: ResultsRange,
pub festival_ids: Vec<u32>,
}
#[derive(RmcSerialize, Debug, Default, Clone)]
#[rmc_struct(0)]
pub struct CompetitionRankingScoreInfo {
pub fest_id: u32,
pub score_data: Vec<CompetitionRankingScoreData>,
pub unk: u32,
pub team_wins: Vec<u32>,
pub team_votes: Vec<u32>,
}
#[derive(RmcSerialize, Debug, Clone)]
#[rmc_struct(0)]
pub struct CompetitionRankingScoreData {
pub unk: u32,
pub pid: u32,
pub score: u32,
pub modified: KerberosDateTime,
pub unk2: u8,
pub appdata: QBuffer,
}
#[rmc_proto(112)]
pub trait Ranking {
#[method_id(16)]
async fn competition_ranking_get_param(
&self,
param: CompetitionRankingGetParam,
) -> Result<Vec<CompetitionRankingScoreInfo>, ErrorCode>;
#[method_id(18)]
async fn upload_competition_ranking_score(
&self,
param: UploadCompetitionData,
) -> Result<bool, ErrorCode>;
}

View file

@ -1,24 +0,0 @@
use macros::{method_id, rmc_proto};
use rnex_core::prudp::station_url::StationUrl;
use rnex_core::rmc::response::ErrorCode;
use rnex_core::rmc::structures::qresult::QResult;
use crate::rmc::structures::any::Any;
#[rmc_proto(11)]
pub trait Secure {
#[method_id(1)]
async fn register(
&self,
station_urls: Vec<StationUrl>,
) -> Result<(QResult, u32, StationUrl), ErrorCode>;
#[method_id(4)]
async fn register_ex(
&self,
station_urls: Vec<StationUrl>,
data: Any,
) -> Result<(QResult, u32, StationUrl), ErrorCode>;
#[method_id(7)]
async fn replace_url(&self, target: StationUrl, dest: StationUrl) -> Result<(), ErrorCode>;
}

View file

@ -1,9 +0,0 @@
use macros::{method_id, rmc_proto};
use rnex_core::rmc::response::ErrorCode;
#[rmc_proto(110)]
pub trait Utility {
#[method_id(1)]
async fn acquire_nex_unique_id(&self) -> Result<u64, ErrorCode>;
}

View file

@ -1,45 +0,0 @@
use rnex_core::rmc::structures::{Result, RmcSerialize};
use std::io::{Cursor, Read, Write};
use v_byte_helpers::{IS_BIG_ENDIAN, ReadExtensions};
#[derive(Debug, Default, Clone)]
pub struct Any {
pub name: String,
pub data: Vec<u8>,
}
impl RmcSerialize for Any {
fn serialize(&self, writer: &mut impl Write) -> Result<()> {
self.name.serialize(writer)?;
let u32_len = self.data.len() as u32;
(u32_len + 4).serialize(writer)?;
self.data.serialize(writer)?;
Ok(())
}
fn deserialize(reader: &mut impl Read) -> Result<Self> {
let name = String::deserialize(reader)?;
// also length ?
let _len2: u32 = reader.read_struct(IS_BIG_ENDIAN)?;
let data = Vec::deserialize(reader)?;
Ok(Any { name, data })
}
}
impl Any {
pub fn try_get<T: RmcSerialize>(&self) -> Option<Result<T>> {
if self.name != T::name() {
return None;
}
return Some(T::deserialize(&mut Cursor::new(&self.data[..])));
}
pub fn new<T: RmcSerialize>(val: &T) -> Result<Self> {
return Ok(Self {
name: T::name().to_owned(),
data: val.to_data()?,
});
}
}

View file

@ -1,36 +0,0 @@
use crate::rmc::structures::Result;
use crate::rmc::structures::RmcSerialize;
use std::io::{Read, Write};
impl<'a> RmcSerialize for &'a [u8] {
fn serialize(&self, writer: &mut impl Write) -> Result<()> {
let u32_size = self.len() as u32;
writer.write(bytemuck::bytes_of(&u32_size))?;
writer.write(self)?;
Ok(())
}
/// DO NOT USE (also maybe split off the serialize and deserialize functions at some point)
fn deserialize(_reader: &mut impl Read) -> Result<Self> {
panic!("cannot deserialize to a u8 slice reference (use this ONLY for writing)")
}
fn serialize_write_size(&self) -> Result<u32> {
Ok(4 + self.len() as u32)
}
}
impl RmcSerialize for Box<[u8]> {
fn serialize(&self, writer: &mut impl Write) -> Result<()> {
(&self[..]).serialize(writer)
}
fn deserialize(reader: &mut impl Read) -> Result<Self> {
Vec::deserialize(reader).map(|v| v.into_boxed_slice())
}
fn serialize_write_size(&self) -> Result<u32> {
(&self[..]).serialize_write_size()
}
}

View file

@ -1,19 +0,0 @@
use macros::RmcSerialize;
use rnex_core::kerberos::KerberosDateTime;
#[derive(Debug, RmcSerialize)]
#[rmc_struct(1)]
pub struct ConnectionData {
pub station_url: String,
pub special_protocols: Vec<u8>,
pub special_station_url: String,
pub date_time: KerberosDateTime,
}
#[derive(Debug, RmcSerialize)]
#[rmc_struct(1)]
pub struct ConnectionDataOld {
pub station_url: String,
pub special_protocols: Vec<u8>,
pub special_station_url: String,
}

View file

@ -1,5 +0,0 @@
use macros::RmcSerialize;
#[derive(RmcSerialize, Debug, Clone, Copy)]
#[rmc_struct(0)]
pub struct Data {}

View file

@ -1,48 +0,0 @@
use std::{fmt, io};
pub struct DummyFormatWriter(u32);
impl fmt::Write for DummyFormatWriter{
fn write_str(&mut self, s: &str) -> fmt::Result {
self.0 += s.as_bytes().len() as u32;
Ok(())
}
}
impl DummyFormatWriter{
pub const fn new() -> Self{ Self(0) }
pub const fn serialize_str_len(&self) -> u32 {
2 + self.0 + 1
}
}
pub struct DummyWriter(u32);
impl io::Write for DummyWriter{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0 += buf.len() as u32;
Ok(buf.len())
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.0 += buf.len() as u32;
Ok(())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl DummyWriter{
pub const fn new() -> Self{ Self(0) }
pub const fn get_total_len(&self) -> u32{
self.0
}
}
pub fn len_of_write(f: impl FnOnce(&mut DummyWriter) -> anyhow::Result<()>) -> u32{
let mut dummy = DummyWriter::new();
f(&mut dummy).ok();
dummy.get_total_len()
}

View file

@ -1,74 +0,0 @@
use crate::rmc::structures::RmcSerialize;
use bytemuck::bytes_of;
use std::io::{Read, Write};
use std::mem::MaybeUninit;
use v_byte_helpers::{IS_BIG_ENDIAN, ReadExtensions};
// this is also for implementing `Buffer` this is tecnically not the same as its handled internaly
// probably but as it has the same mapping it doesn't matter and simplifies things
impl<T: RmcSerialize> RmcSerialize for Vec<T> {
fn serialize(&self, writer: &mut impl Write) -> crate::rmc::structures::Result<()> {
let u32_len = self.len() as u32;
writer.write_all(bytes_of(&u32_len))?;
for e in self {
e.serialize(writer)?;
}
Ok(())
}
fn deserialize(reader: &mut impl Read) -> crate::rmc::structures::Result<Self> {
println!("reading list");
let len: u32 = reader.read_struct(IS_BIG_ENDIAN)?;
println!("readijg list: {:?}", len);
//let mut vec = Vec::with_capacity(len as usize);
let vec: Vec<T> = (0..len)
.map(|_| T::deserialize(reader))
.collect::<Result<Vec<_>, _>>()?;
Ok(vec)
}
fn serialize_write_size(&self) -> crate::rmc::structures::Result<u32> {
let mut val = 0u32;
for i in self {
val += i.serialize_write_size()?;
}
Ok(4 + val)
}
}
impl<const LEN: usize, T: RmcSerialize> RmcSerialize for [T; LEN] {
fn serialize(&self, writer: &mut impl Write) -> crate::rmc::structures::Result<()> {
for i in 0..LEN {
self[i].serialize(writer)?;
}
Ok(())
}
fn deserialize(reader: &mut impl Read) -> crate::rmc::structures::Result<Self> {
let mut arr = [const { MaybeUninit::<T>::uninit() }; LEN];
for i in 0..LEN {
arr[i] = MaybeUninit::new(T::deserialize(reader)?);
}
// all of the elements are now initialized so it is safe to assume they are initialized
let arr = arr.map(|v| unsafe { v.assume_init() });
Ok(arr)
}
#[inline(always)]
fn serialize_write_size(&self) -> crate::rmc::structures::Result<u32> {
let mut val = 0u32;
for i in self {
val += i.serialize_write_size()?;
}
Ok(val)
}
}

Some files were not shown because too many files have changed in this diff Show more