diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..784088f --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +# CMake build directories +build/ +_codeql_build_dir/ +_codeql_detected_source_root + +# Compiled binaries +*.o +*.a +*.so +*.out +neural_demo diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..998e238 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,15 @@ +cmake_minimum_required(VERSION 3.14) +project(NatureRealityEngine CXX) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# Neural network library +add_library(nre_neural + engine/neural/NeuralNetwork.cpp +) +target_include_directories(nre_neural PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + +# Neural network demo +add_executable(neural_demo examples/neural_demo/main.cpp) +target_link_libraries(neural_demo PRIVATE nre_neural) diff --git a/engine/neural/NeuralNetwork.cpp b/engine/neural/NeuralNetwork.cpp new file mode 100644 index 0000000..6b715f3 --- /dev/null +++ b/engine/neural/NeuralNetwork.cpp @@ -0,0 +1,83 @@ +#include "NeuralNetwork.h" + +#include +#include +#include +#include + +namespace NRE { + +NeuralNetwork::NeuralNetwork(uint32_t seed) { + if (seed == 0) { + std::random_device rd; + seed_ = rd(); + } else { + seed_ = seed; + } + rng_.seed(seed_); +} + +void NeuralNetwork::SetSeed(uint32_t seed) { + if (seed == 0) { + std::random_device rd; + seed_ = rd(); + } else { + seed_ = seed; + } + rng_.seed(seed_); + InitialiseWeights(); +} + +uint32_t NeuralNetwork::GetSeed() const { + return seed_; +} + +void NeuralNetwork::AddLayer(int inputSize, int outputSize) { + if (inputSize <= 0 || outputSize <= 0) { + throw std::invalid_argument("Layer dimensions must be positive"); + } + layers_.push_back({inputSize, outputSize, + std::vector(outputSize * inputSize), + std::vector(outputSize, 0.0f)}); + // He initialisation for this layer using the seeded RNG + float stddev = std::sqrt(2.0f / static_cast(inputSize)); + std::normal_distribution dist(0.0f, stddev); + Layer& layer = layers_.back(); + for (float& w : layer.weights) { + w = dist(rng_); + } +} + +std::vector NeuralNetwork::Forward(const std::vector& input) const { + std::vector activation = input; + for (const Layer& layer : layers_) { + if (static_cast(activation.size()) != layer.inputSize) { + throw std::runtime_error("Input size does not match layer dimensions"); + } + std::vector output(layer.outputSize, 0.0f); + for (int o = 0; o < layer.outputSize; ++o) { + float sum = layer.biases[o]; + for (int i = 0; i < layer.inputSize; ++i) { + sum += layer.weights[o * layer.inputSize + i] * activation[i]; + } + // ReLU activation + output[o] = std::max(0.0f, sum); + } + activation = std::move(output); + } + return activation; +} + +void NeuralNetwork::InitialiseWeights() { + rng_.seed(seed_); + for (Layer& layer : layers_) { + float stddev = std::sqrt(2.0f / static_cast(layer.inputSize)); + std::normal_distribution dist(0.0f, stddev); + for (float& w : layer.weights) { + w = dist(rng_); + } + std::fill(layer.biases.begin(), layer.biases.end(), 0.0f); + } +} + +} // namespace NRE diff --git a/engine/neural/NeuralNetwork.h b/engine/neural/NeuralNetwork.h new file mode 100644 index 0000000..ed23def --- /dev/null +++ b/engine/neural/NeuralNetwork.h @@ -0,0 +1,43 @@ +#pragma once + +#include +#include +#include + +namespace NRE { + +// A fully-connected feedforward neural network with seeded weight initialization +// for reproducible results in procedural generation. +class NeuralNetwork { +public: + // Construct with an optional seed (default 0 = use std::random_device) + explicit NeuralNetwork(uint32_t seed = 0); + + // Set (or reset) the RNG seed and reinitialise all weights + void SetSeed(uint32_t seed); + + // Return the seed currently in use + uint32_t GetSeed() const; + + // Append a fully-connected layer (inputSize x outputSize weights + bias) + void AddLayer(int inputSize, int outputSize); + + // Run a forward pass and return the output activations + std::vector Forward(const std::vector& input) const; + +private: + void InitialiseWeights(); + + struct Layer { + int inputSize; + int outputSize; + std::vector weights; // row-major [outputSize][inputSize] + std::vector biases; // [outputSize] + }; + + uint32_t seed_; + std::mt19937 rng_; + std::vector layers_; +}; + +} // namespace NRE diff --git a/examples/neural_demo/main.cpp b/examples/neural_demo/main.cpp new file mode 100644 index 0000000..1e7eb27 --- /dev/null +++ b/examples/neural_demo/main.cpp @@ -0,0 +1,46 @@ +#include +#include +#include + +#include "../../engine/neural/NeuralNetwork.h" + +// Demonstrates seeded neural network for reproducible procedural generation. +// Running with the same seed always produces identical outputs. +int main() { + const uint32_t seed = 42; + + // Build a small network: 4 inputs -> 8 hidden -> 4 outputs + NRE::NeuralNetwork nn(seed); + nn.AddLayer(4, 8); + nn.AddLayer(8, 4); + + std::vector input = {0.5f, 0.3f, 0.8f, 0.1f}; + + std::cout << "Seed: " << nn.GetSeed() << "\n"; + std::cout << "Forward pass output:\n ["; + auto output = nn.Forward(input); + for (size_t i = 0; i < output.size(); ++i) { + std::cout << output[i]; + if (i + 1 < output.size()) std::cout << ", "; + } + std::cout << "]\n"; + + // Verify reproducibility: rebuild with the same seed + NRE::NeuralNetwork nn2(seed); + nn2.AddLayer(4, 8); + nn2.AddLayer(8, 4); + auto output2 = nn2.Forward(input); + + bool reproducible = (output == output2); + std::cout << "Reproducible with same seed: " << (reproducible ? "YES" : "NO") << "\n"; + + // Show that a different seed produces different weights + NRE::NeuralNetwork nn3(seed + 1); + nn3.AddLayer(4, 8); + nn3.AddLayer(8, 4); + auto output3 = nn3.Forward(input); + std::cout << "Different seed produces different output: " + << (output != output3 ? "YES" : "NO") << "\n"; + + return 0; +}