Mercurial > games > semicongine
changeset 1427:676fc13685a9
did: restructure files and imports
author | sam <sam@basx.dev> |
---|---|
date | Sat, 11 Jan 2025 14:04:39 +0700 |
parents | 075e4197bc95 |
children | d16964858790 |
files | semicongine.nim semicongine/audio.nim semicongine/audio/generators.nim semicongine/audio/mixer_module.nim semicongine/audio/platform/linux.nim semicongine/audio/platform/windows.nim semicongine/audio/resources.nim semicongine/background_loader.nim semicongine/background_loaders.nim semicongine/core/types.nim semicongine/font.nim semicongine/gltf.nim semicongine/image.nim semicongine/loaders.nim semicongine/platform/linux/audio.nim semicongine/platform/windows/audio.nim semicongine/rendering.nim semicongine/rendering/platform/linux.nim semicongine/rendering/platform/windows.nim semicongine/text/font.nim tests/test_audio.nim tests/test_gltf.nim tests/test_rendering.nim tests/test_storage.nim tests/test_text.nim |
diffstat | 25 files changed, 958 insertions(+), 997 deletions(-) [+] |
line wrap: on
line diff
--- a/semicongine.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/semicongine.nim Sat Jan 11 14:04:39 2025 +0700 @@ -1,68 +1,13 @@ +import std/tables +import std/locks + import ./semicongine/core export core -import ./semicongine/resources -export resources - +from ./semicongine/rendering import initVulkan +from ./semicongine/audio import audioWorker +from ./semicongine/background_loaders import initBackgroundLoader import ./semicongine/loaders -export loaders - -import ./semicongine/background_loader -export background_loader - -import ./semicongine/image -export image - -import ./semicongine/rendering -export rendering - -import ./semicongine/rendering/renderer -export renderer - -import ./semicongine/rendering/swapchain -export swapchain - -import ./semicongine/rendering/renderpasses -export renderpasses - -import ./semicongine/rendering/shaders -export shaders - -import ./semicongine/rendering/memory -export memory - -import ./semicongine/rendering/vulkan_wrappers -export vulkan_wrappers - -import ./semicongine/storage -import ./semicongine/input -export storage -export input - -import ./semicongine/audio -export audio - -# texture packing is required for font atlas -import ./semicongine/text/font -export font - -import ./semicongine/text -export text - -import ./semicongine/gltf -export gltf - -when not defined(WITHOUT_CONTRIB): - import ./semicongine/contrib/steam - import ./semicongine/contrib/settings - import ./semicongine/contrib/algorithms/texture_packing - import ./semicongine/contrib/algorithms/collision - import ./semicongine/contrib/algorithms/noise - export steam - export settings - export texture_packing - export collision - export noise #### Main engine object @@ -72,9 +17,10 @@ # start audio engine_obj_internal.mixer = createShared(Mixer) - engine_obj_internal.mixer[] = initMixer() + engine_obj_internal.mixer[] = Mixer() + engine_obj_internal.mixer[].tracks[""] = Track(level: 1) + engine_obj_internal.mixer[].lock.initLock() engine_obj_internal.audiothread.createThread(audioWorker, engine_obj_internal.mixer) - engine_obj_internal.initialized = true engine_obj_internal.rawLoader = initBackgroundLoader(loadBytes) engine_obj_internal.jsonLoader = initBackgroundLoader(loadJson) @@ -83,5 +29,4 @@ engine_obj_internal.imageLoader = initBackgroundLoader(loadImage[BGRA]) engine_obj_internal.audioLoader = initBackgroundLoader(loadAudio) -proc setEngine*(e: Engine) = - engine_obj_internal = e + engine_obj_internal.initialized = true
--- a/semicongine/audio.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/semicongine/audio.nim Sat Jan 11 14:04:39 2025 +0700 @@ -1,8 +1,424 @@ -import ./audio/mixer_module -export mixer_module +import std/endians +import std/locks +import std/logging +import std/math +import std/monotimes +import std/os +import std/strformat +import std/tables +import std/times +import std/streams + +import ./core + +const NBUFFERS = 32 +# it seems that some alsa hardware has a problem with smaller buffers than 512 +when defined(linux): + const BUFFERSAMPLECOUNT = 512 +else: + const BUFFERSAMPLECOUNT = 256 + +when defined(windows): + include ./platform/windows/audio +when defined(linux): + include ./platform/linux/audio + +# TODO: this should probably be in the load-code-stuff +# proc LoadSound*(mixer: var Mixer, name: string, resource: string) = +# assert not (name in mixer.sounds) +# mixer.sounds[name] = LoadAudio(resource) + +proc addSound*(name: string, sound: SoundData) = + if name in engine().mixer.sounds: + warn "sound with name '", name, "' was already loaded, overwriting" + engine().mixer.sounds[name] = sound + +proc addTrack*(name: string, level: AudioLevel = 1'f) = + if name in engine().mixer.tracks: + warn "track with name '", name, "' was already loaded, overwriting" + engine().mixer.lock.withLock: + engine().mixer.tracks[name] = Track(level: level) + +proc play*( + soundName: string, + track = "", + stopOtherSounds = false, + loop = false, + levelLeft, levelRight: AudioLevel, +): uint64 = + assert track in engine().mixer.tracks, &"Track '{track}' does not exists" + assert soundName in engine().mixer.sounds, soundName & " not loaded" + engine().mixer.lock.withLock: + if stopOtherSounds: + engine().mixer.tracks[track].playing.clear() + engine().mixer.tracks[track].playing[engine().mixer.playbackCounter] = Playback( + sound: engine().mixer.sounds[soundName], + position: 0, + loop: loop, + levelLeft: levelLeft, + levelRight: levelRight, + paused: false, + ) + result = engine().mixer.playbackCounter + inc engine().mixer.playbackCounter + +proc play*( + soundName: string, + track = "", + stopOtherSounds = false, + loop = false, + level: AudioLevel = 1'f, +): uint64 = + play( + soundName = soundName, + track = track, + stopOtherSounds = stopOtherSounds, + loop = loop, + levelLeft = level, + levelRight = level, + ) + +proc stop*() = + engine().mixer.lock.withLock: + for track in engine().mixer.tracks.mvalues: + track.playing.clear() + +proc getLevel*(): AudioLevel = + engine().mixer.level + +proc getLevel*(track: string): AudioLevel = + engine().mixer.tracks[track].level + +proc getLevel*(playbackId: uint64): (AudioLevel, AudioLevel) = + for track in engine().mixer.tracks.mvalues: + if playbackId in track.playing: + return (track.playing[playbackId].levelLeft, track.playing[playbackId].levelRight) + +proc setLevel*(level: AudioLevel) = + engine().mixer.level = level + +proc setLevel*(track: string, level: AudioLevel) = + engine().mixer.lock.withLock: + engine().mixer.tracks[track].level = level + +proc setLevel*(playbackId: uint64, levelLeft, levelRight: AudioLevel) = + engine().mixer.lock.withLock: + for track in engine().mixer.tracks.mvalues: + if playbackId in track.playing: + track.playing[playbackId].levelLeft = levelLeft + track.playing[playbackId].levelRight = levelRight + +proc setLevel*(playbackId: uint64, level: AudioLevel) = + setLevel(playbackId, level, level) + +proc stop*(track: string) = + assert track in engine().mixer.tracks + engine().mixer.lock.withLock: + engine().mixer.tracks[track].playing.clear() + +proc stop*(playbackId: uint64) = + engine().mixer.lock.withLock: + for track in engine().mixer.tracks.mvalues: + if playbackId in track.playing: + track.playing.del(playbackId) + break + +proc pause*(value: bool) = + engine().mixer.lock.withLock: + for track in engine().mixer.tracks.mvalues: + for playback in track.playing.mvalues: + playback.paused = value + +proc pause*(track: string, value: bool) = + engine().mixer.lock.withLock: + for playback in engine().mixer.tracks[track].playing.mvalues: + playback.paused = value + +proc pause*(playbackId: uint64, value: bool) = + engine().mixer.lock.withLock: + for track in engine().mixer.tracks.mvalues: + if playbackId in track.playing: + track.playing[playbackId].paused = value + +proc pause*() = + pause(true) + +proc pause*(track: string) = + pause(track, true) + +proc pause*(playbackId: uint64) = + pause(playbackId, true) + +proc unpause*() = + pause(false) + +proc unpause*(track: string) = + pause(track, false) + +proc unpause*(playbackId: uint64) = + pause(playbackId, false) + +proc fadeTo*(track: string, level: AudioLevel, time: float) = + engine().mixer.tracks[track].targetLevel = level + engine().mixer.tracks[track].fadeTime = time + engine().mixer.tracks[track].fadeStep = + level.float - engine().mixer.tracks[track].level.float / time + +proc isPlaying*(): bool = + engine().mixer.lock.withLock: + for track in engine().mixer.tracks.mvalues: + for playback in track.playing.values: + if not playback.paused: + return true + return false + +proc isPlaying*(track: string): bool = + engine().mixer.lock.withLock: + if engine().mixer.tracks.contains(track): + for playback in engine().mixer.tracks[track].playing.values: + if not playback.paused: + return true + return false + +func applyLevel(sample: Sample, levelLeft, levelRight: AudioLevel): Sample = + [int16(float(sample[0]) * levelLeft), int16(float(sample[1]) * levelRight)] + +func clip(value: int32): int16 = + int16(max(min(int32(high(int16)), value), int32(low(int16)))) + +# used for combining sounds +func mix(a, b: Sample): Sample = + [clip(int32(a[0]) + int32(b[0])), clip(int32(a[1]) + int32(b[1]))] + +proc updateSoundBuffer*(mixer: var Mixer) = + let t = getMonoTime() + + let dt = (t - mixer.lastUpdate).inNanoseconds.float64 / 1_000_000_000'f64 + mixer.lastUpdate = t -import ./audio/generators -export generators + # update fadings + mixer.lock.withLock: + for track in mixer.tracks.mvalues: + if track.fadeTime > 0: + track.fadeTime -= dt + track.level = (track.level.float64 + track.fadeStep.float64 * dt).clamp( + AudioLevel.low, AudioLevel.high + ) + if track.fadeTime <= 0: + track.level = track.targetLevel + # mix + var hasData = false + for i in 0 ..< mixer.buffers[mixer.currentBuffer].len: + var mixedSample = [0'i16, 0'i16] + mixer.lock.withLock: + for track in mixer.tracks.mvalues: + var stoppedSounds: seq[uint64] + for (id, playback) in track.playing.mpairs: + if playback.paused: + continue + let sample = applyLevel( + playback.sound[playback.position], + mixer.level * track.level * playback.levelLeft, + mixer.level * track.level * playback.levelRight, + ) + mixedSample = mix(mixedSample, sample) + hasData = true + inc playback.position + if playback.position >= playback.sound.len: + if playback.loop: + playback.position = 0 + else: + stoppedSounds.add id + for id in stoppedSounds: + track.playing.del(id) + mixer.buffers[mixer.currentBuffer][i] = mixedSample + # send data to sound device + if hasData: + mixer.device.WriteSoundData(mixer.currentBuffer) + mixer.currentBuffer = (mixer.currentBuffer + 1) mod mixer.buffers.len + +# DSP functions +# TODO: finish implementation, one day + +#[ +# +proc lowPassFilter(data: var SoundData, cutoff: int) = + let alpha = float(cutoff) / AUDIO_SAMPLE_RATE + var value = data[0] + for i in 0 ..< data.len: + value[0] += int16(alpha * float(data[i][0] - value[0])) + value[1] += int16(alpha * float(data[i][1] - value[1])) + data[i] = value + + proc downsample(data: var SoundData, n: int) = + let newLen = (data.len - 1) div n + 1 + for i in 0 ..< newLen: + data[i] = data[i * n] + data.setLen(newLen) + + proc upsample(data: var SoundData, m: int) = + data.setLen(data.len * m) + var i = data.len - 1 + while i < 0: + if i mod m == 0: + data[i] = data[i div m] + else: + data[i] = [0, 0] + i.dec + + proc slowdown(data: var SoundData, m, n: int) = + data.upsample(m) + # TODO + # data.lowPassFilter(m) + data.downsample(n) + + ]# + +proc setupDevice(mixer: var Mixer) = + # call this inside audio thread + var bufferaddresses: seq[ptr SoundData] + for i in 0 ..< NBUFFERS: + mixer.buffers.add newSeq[Sample](BUFFERSAMPLECOUNT) + for i in 0 ..< mixer.buffers.len: + bufferaddresses.add (addr mixer.buffers[i]) + mixer.device = OpenSoundDevice(AUDIO_SAMPLE_RATE, bufferaddresses) + +proc destroy(mixer: var Mixer) = + mixer.lock.deinitLock() + mixer.device.CloseSoundDevice() + +proc audioWorker*(mixer: ptr Mixer) {.thread.} = + mixer[].setupDevice() + onThreadDestruction( + proc() = + mixer[].lock.withLock(mixer[].destroy()) + freeShared(mixer) + ) + while true: + mixer[].updateSoundBuffer() + +# audio-generator helpers + +proc sinewave(f: float): proc(x: float): float = + proc ret(x: float): float = + sin(x * 2 * Pi * f) + + result = ret -import ./audio/resources -export resources +proc sineSoundData*(f: float, len: float, rate: int, amplitude = 0.5'f32): SoundData = + let dt = 1'f / float(rate) + var sine = sinewave(f) + for i in 0 ..< int(float(rate) * len): + let t = dt * float(i) + let value = int16(sine(t) * float(high(int16)) * amplitude) + result.add [value, value] + +# loaders + +type + Encoding {.size: sizeof(uint32).} = enum + # Unspecified = 0 + # Uint8Ulaw = 1 + # Int8 = 2 + Int16 = 3 + # Int24 = 4 + # Int32 = 5 + # Float32 = 6 + # Float64 = 7 + + AuHeader = object + magicNumber: uint32 + dataOffset: uint32 + dataSize: uint32 + encoding: Encoding + sampleRate: uint32 + channels: uint32 + +proc readSample(stream: Stream, encoding: Encoding, channels: int): Sample = + result[0] = stream.readint16() + swapEndian16(addr result[0], addr result[0]) + + if channels == 2: + result[1] = stream.readint16() + swapEndian16(addr result[1], addr result[1]) + else: + result[1] = result[0] + +# https://en.wikipedia.org/wiki/Au_file_format +proc readAU*(stream: Stream): SoundData = + var header: AuHeader + + for name, value in fieldPairs(header): + var bytes: array[4, uint8] + stream.read(bytes) + swap(bytes[0], bytes[3]) + swap(bytes[1], bytes[2]) + value = cast[typeof(value)](bytes) + + assert header.magicNumber == 0x2e736e64 + if header.sampleRate != AUDIO_SAMPLE_RATE: + raise newException( + Exception, + &"Only support sample rate of {AUDIO_SAMPLE_RATE} Hz but got {header.sampleRate} Hz, please resample (e.g. ffmpeg -i <infile> -ar {AUDIO_SAMPLE_RATE} <outfile>)", + ) + if not (header.channels in [1'u32, 2'u32]): + raise newException( + Exception, + "Only support mono and stereo audio at the moment (1 or 2 channels), but found " & + $header.channels, + ) + + var annotation: string + stream.read(annotation) + + stream.setPosition(int(header.dataOffset)) + while not stream.atEnd(): + result.add stream.readSample(header.encoding, int(header.channels)) + +{.compile: currentSourcePath.parentDir() & "/thirdparty/stb/stb_vorbis.c".} + +proc stb_vorbis_decode_memory( + mem: pointer, + len: cint, + channels: ptr cint, + sample_rate: ptr cint, + output: ptr ptr cshort, +): cint {.importc.} + +proc readVorbis*(stream: Stream): SoundData = + var + data = stream.readAll() + channels: cint + sampleRate: cint + output: ptr cshort + + var nSamples = stb_vorbis_decode_memory( + addr data[0], cint(data.len), addr channels, addr sampleRate, addr output + ) + + if nSamples < 0: + raise newException( + Exception, &"Unable to read ogg/vorbis sound file, error code: {nSamples}" + ) + if sampleRate != AUDIO_SAMPLE_RATE: + raise newException( + Exception, + &"Only support sample rate of {AUDIO_SAMPLE_RATE} Hz but got {sampleRate} Hz, please resample (e.g. ffmpeg -i <infile> -acodec libvorbis -ar {AUDIO_SAMPLE_RATE} <outfile>)", + ) + + if channels == 2: + result.setLen(int(nSamples)) + copyMem(addr result[0], output, nSamples * sizeof(Sample)) + nativeFree(output) + elif channels == 1: + for i in 0 ..< nSamples: + let value = cast[ptr UncheckedArray[int16]](output)[i] + result.add [value, value] + nativeFree(output) + else: + nativeFree(output) + raise newException( + Exception, + "Only support mono and stereo audio at the moment (1 or 2 channels), but found " & + $channels, + )
--- a/semicongine/audio/generators.nim Sat Jan 11 12:22:21 2025 +0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,17 +0,0 @@ -import std/math - -import ../core - -proc sinewave(f: float): proc(x: float): float = - proc ret(x: float): float = - sin(x * 2 * Pi * f) - - result = ret - -proc sineSoundData*(f: float, len: float, rate: int, amplitude = 0.5'f32): SoundData = - let dt = 1'f / float(rate) - var sine = sinewave(f) - for i in 0 ..< int(float(rate) * len): - let t = dt * float(i) - let value = int16(sine(t) * float(high(int16)) * amplitude) - result.add [value, value]
--- a/semicongine/audio/mixer_module.nim Sat Jan 11 12:22:21 2025 +0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,300 +0,0 @@ -import std/os -import std/locks -import std/logging -import std/math -import std/monotimes -import std/strformat -import std/tables -import std/times - -import ../core - -const NBUFFERS = 32 -# it seems that some alsa hardware has a problem with smaller buffers than 512 -when defined(linux): - const BUFFERSAMPLECOUNT = 512 -else: - const BUFFERSAMPLECOUNT = 256 - -when defined(windows): - include ./platform/windows -when defined(linux): - include ./platform/linux - -proc initMixer*(): Mixer = - result = Mixer(tracks: initTable[string, Track](), level: 1'f) - result.tracks[""] = Track(level: 1) - result.lock.initLock() - -# TODO: this should probably be in the load-code-stuff -# proc LoadSound*(mixer: var Mixer, name: string, resource: string) = -# assert not (name in mixer.sounds) -# mixer.sounds[name] = LoadAudio(resource) - -proc addSound*(name: string, sound: SoundData) = - if name in engine().mixer.sounds: - warn "sound with name '", name, "' was already loaded, overwriting" - engine().mixer.sounds[name] = sound - -proc addTrack*(name: string, level: AudioLevel = 1'f) = - if name in engine().mixer.tracks: - warn "track with name '", name, "' was already loaded, overwriting" - engine().mixer.lock.withLock: - engine().mixer.tracks[name] = Track(level: level) - -proc play*( - soundName: string, - track = "", - stopOtherSounds = false, - loop = false, - levelLeft, levelRight: AudioLevel, -): uint64 = - assert track in engine().mixer.tracks, &"Track '{track}' does not exists" - assert soundName in engine().mixer.sounds, soundName & " not loaded" - engine().mixer.lock.withLock: - if stopOtherSounds: - engine().mixer.tracks[track].playing.clear() - engine().mixer.tracks[track].playing[engine().mixer.playbackCounter] = Playback( - sound: engine().mixer.sounds[soundName], - position: 0, - loop: loop, - levelLeft: levelLeft, - levelRight: levelRight, - paused: false, - ) - result = engine().mixer.playbackCounter - inc engine().mixer.playbackCounter - -proc play*( - soundName: string, - track = "", - stopOtherSounds = false, - loop = false, - level: AudioLevel = 1'f, -): uint64 = - play( - soundName = soundName, - track = track, - stopOtherSounds = stopOtherSounds, - loop = loop, - levelLeft = level, - levelRight = level, - ) - -proc stop*() = - engine().mixer.lock.withLock: - for track in engine().mixer.tracks.mvalues: - track.playing.clear() - -proc getLevel*(): AudioLevel = - engine().mixer.level - -proc getLevel*(track: string): AudioLevel = - engine().mixer.tracks[track].level - -proc getLevel*(playbackId: uint64): (AudioLevel, AudioLevel) = - for track in engine().mixer.tracks.mvalues: - if playbackId in track.playing: - return (track.playing[playbackId].levelLeft, track.playing[playbackId].levelRight) - -proc setLevel*(level: AudioLevel) = - engine().mixer.level = level - -proc setLevel*(track: string, level: AudioLevel) = - engine().mixer.lock.withLock: - engine().mixer.tracks[track].level = level - -proc setLevel*(playbackId: uint64, levelLeft, levelRight: AudioLevel) = - engine().mixer.lock.withLock: - for track in engine().mixer.tracks.mvalues: - if playbackId in track.playing: - track.playing[playbackId].levelLeft = levelLeft - track.playing[playbackId].levelRight = levelRight - -proc setLevel*(playbackId: uint64, level: AudioLevel) = - setLevel(playbackId, level, level) - -proc stop*(track: string) = - assert track in engine().mixer.tracks - engine().mixer.lock.withLock: - engine().mixer.tracks[track].playing.clear() - -proc stop*(playbackId: uint64) = - engine().mixer.lock.withLock: - for track in engine().mixer.tracks.mvalues: - if playbackId in track.playing: - track.playing.del(playbackId) - break - -proc pause*(value: bool) = - engine().mixer.lock.withLock: - for track in engine().mixer.tracks.mvalues: - for playback in track.playing.mvalues: - playback.paused = value - -proc pause*(track: string, value: bool) = - engine().mixer.lock.withLock: - for playback in engine().mixer.tracks[track].playing.mvalues: - playback.paused = value - -proc pause*(playbackId: uint64, value: bool) = - engine().mixer.lock.withLock: - for track in engine().mixer.tracks.mvalues: - if playbackId in track.playing: - track.playing[playbackId].paused = value - -proc pause*() = - pause(true) - -proc pause*(track: string) = - pause(track, true) - -proc pause*(playbackId: uint64) = - pause(playbackId, true) - -proc unpause*() = - pause(false) - -proc unpause*(track: string) = - pause(track, false) - -proc unpause*(playbackId: uint64) = - pause(playbackId, false) - -proc fadeTo*(track: string, level: AudioLevel, time: float) = - engine().mixer.tracks[track].targetLevel = level - engine().mixer.tracks[track].fadeTime = time - engine().mixer.tracks[track].fadeStep = - level.float - engine().mixer.tracks[track].level.float / time - -proc isPlaying*(): bool = - engine().mixer.lock.withLock: - for track in engine().mixer.tracks.mvalues: - for playback in track.playing.values: - if not playback.paused: - return true - return false - -proc isPlaying*(track: string): bool = - engine().mixer.lock.withLock: - if engine().mixer.tracks.contains(track): - for playback in engine().mixer.tracks[track].playing.values: - if not playback.paused: - return true - return false - -func applyLevel(sample: Sample, levelLeft, levelRight: AudioLevel): Sample = - [int16(float(sample[0]) * levelLeft), int16(float(sample[1]) * levelRight)] - -func clip(value: int32): int16 = - int16(max(min(int32(high(int16)), value), int32(low(int16)))) - -# used for combining sounds -func mix(a, b: Sample): Sample = - [clip(int32(a[0]) + int32(b[0])), clip(int32(a[1]) + int32(b[1]))] - -proc updateSoundBuffer*(mixer: var Mixer) = - let t = getMonoTime() - - let dt = (t - mixer.lastUpdate).inNanoseconds.float64 / 1_000_000_000'f64 - mixer.lastUpdate = t - - # update fadings - for track in mixer.tracks.mvalues: - if track.fadeTime > 0: - track.fadeTime -= dt - track.level = (track.level.float64 + track.fadeStep.float64 * dt).clamp( - AudioLevel.low, AudioLevel.high - ) - if track.fadeTime <= 0: - track.level = track.targetLevel - # mix - var hasData = false - for i in 0 ..< mixer.buffers[mixer.currentBuffer].len: - var mixedSample = [0'i16, 0'i16] - mixer.lock.withLock: - for track in mixer.tracks.mvalues: - var stoppedSounds: seq[uint64] - for (id, playback) in track.playing.mpairs: - if playback.paused: - continue - let sample = applyLevel( - playback.sound[playback.position], - mixer.level * track.level * playback.levelLeft, - mixer.level * track.level * playback.levelRight, - ) - mixedSample = mix(mixedSample, sample) - hasData = true - inc playback.position - if playback.position >= playback.sound.len: - if playback.loop: - playback.position = 0 - else: - stoppedSounds.add id - for id in stoppedSounds: - track.playing.del(id) - mixer.buffers[mixer.currentBuffer][i] = mixedSample - # send data to sound device - if hasData: - mixer.device.WriteSoundData(mixer.currentBuffer) - mixer.currentBuffer = (mixer.currentBuffer + 1) mod mixer.buffers.len - -# DSP functions -# TODO: finish implementation, one day - -#[ -# -proc lowPassFilter(data: var SoundData, cutoff: int) = - let alpha = float(cutoff) / AUDIO_SAMPLE_RATE - var value = data[0] - for i in 0 ..< data.len: - value[0] += int16(alpha * float(data[i][0] - value[0])) - value[1] += int16(alpha * float(data[i][1] - value[1])) - data[i] = value - - proc downsample(data: var SoundData, n: int) = - let newLen = (data.len - 1) div n + 1 - for i in 0 ..< newLen: - data[i] = data[i * n] - data.setLen(newLen) - - proc upsample(data: var SoundData, m: int) = - data.setLen(data.len * m) - var i = data.len - 1 - while i < 0: - if i mod m == 0: - data[i] = data[i div m] - else: - data[i] = [0, 0] - i.dec - - proc slowdown(data: var SoundData, m, n: int) = - data.upsample(m) - # TODO - # data.lowPassFilter(m) - data.downsample(n) - - ]# - -proc setupDevice(mixer: var Mixer) = - # call this inside audio thread - var bufferaddresses: seq[ptr SoundData] - for i in 0 ..< NBUFFERS: - mixer.buffers.add newSeq[Sample](BUFFERSAMPLECOUNT) - for i in 0 ..< mixer.buffers.len: - bufferaddresses.add (addr mixer.buffers[i]) - mixer.device = OpenSoundDevice(AUDIO_SAMPLE_RATE, bufferaddresses) - -proc destroy(mixer: var Mixer) = - mixer.lock.deinitLock() - mixer.device.CloseSoundDevice() - -proc audioWorker*(mixer: ptr Mixer) {.thread.} = - mixer[].setupDevice() - onThreadDestruction( - proc() = - mixer[].lock.withLock(mixer[].destroy()) - freeShared(mixer) - ) - while true: - mixer[].updateSoundBuffer()
--- a/semicongine/audio/platform/linux.nim Sat Jan 11 12:22:21 2025 +0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,87 +0,0 @@ -{.pragma: alsafunc, importc, cdecl, dynlib: "libasound.so.2".} -proc snd_pcm_open*( - pcm_ref: ptr snd_pcm_p, name: cstring, streamMode: StreamMode, openmode: OpenMode -): cint {.alsafunc.} - -proc snd_pcm_close*(pcm: snd_pcm_p): cint {.alsafunc.} -proc snd_pcm_hw_params_malloc*( - hw_params_ptr: ptr snd_pcm_hw_params_p -): cint {.alsafunc.} - -proc snd_pcm_hw_params_free*(hw_params: snd_pcm_hw_params_p) {.alsafunc.} -proc snd_pcm_hw_params_any*( - pcm: snd_pcm_p, params: snd_pcm_hw_params_p -): cint {.alsafunc.} - -proc snd_pcm_hw_params_set_access*( - pcm: snd_pcm_p, params: snd_pcm_hw_params_p, mode: AccessMode -): cint {.alsafunc.} - -proc snd_pcm_hw_params_set_format*( - pcm: snd_pcm_p, params: snd_pcm_hw_params_p, format: PCMFormat -): cint {.alsafunc.} - -proc snd_pcm_hw_params_set_channels*( - pcm: snd_pcm_p, params: snd_pcm_hw_params_p, val: cuint -): cint {.alsafunc.} - -proc snd_pcm_hw_params_set_buffer_size*( - pcm: snd_pcm_p, params: snd_pcm_hw_params_p, size: snd_pcm_uframes_t -): cint {.alsafunc.} - -proc snd_pcm_hw_params_set_rate*( - pcm: snd_pcm_p, params: snd_pcm_hw_params_p, val: cuint, dir: cint -): cint {.alsafunc.} - -proc snd_pcm_hw_params*(pcm: snd_pcm_p, params: snd_pcm_hw_params_p): cint {.alsafunc.} -proc snd_pcm_writei*( - pcm: snd_pcm_p, buffer: pointer, size: snd_pcm_uframes_t -): snd_pcm_sframes_t {.alsafunc.} - -proc snd_pcm_recover*(pcm: snd_pcm_p, err: cint, silent: cint): cint {.alsafunc.} - -template checkAlsaResult(call: untyped) = - let value = call - if value < 0: - raise - newException(Exception, "Alsa error: " & astToStr(call) & " returned " & $value) - -# required for engine: - -proc OpenSoundDevice*( - sampleRate: uint32, buffers: seq[ptr SoundData] -): NativeSoundDevice = - var hw_params: snd_pcm_hw_params_p = nil - checkAlsaResult snd_pcm_open( - addr result.handle, "default", SND_PCM_STREAM_PLAYBACK, SND_PCM_BLOCK - ) - - # hw parameters, quiet a bit of hardcoding here - checkAlsaResult snd_pcm_hw_params_malloc(addr hw_params) - checkAlsaResult snd_pcm_hw_params_any(result.handle, hw_params) - checkAlsaResult snd_pcm_hw_params_set_access( - result.handle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED - ) - checkAlsaResult snd_pcm_hw_params_set_format( - result.handle, hw_params, SND_PCM_FORMAT_S16_LE - ) - checkAlsaResult snd_pcm_hw_params_set_rate(result.handle, hw_params, sampleRate, 0) - checkAlsaResult snd_pcm_hw_params_set_channels(result.handle, hw_params, 2) - checkAlsaResult snd_pcm_hw_params_set_buffer_size( - result.handle, hw_params, snd_pcm_uframes_t(buffers[0][].len) - ) - checkAlsaResult snd_pcm_hw_params(result.handle, hw_params) - snd_pcm_hw_params_free(hw_params) - result.buffers = buffers - -proc WriteSoundData*(soundDevice: NativeSoundDevice, buffer: int) = - var ret = snd_pcm_writei( - soundDevice.handle, - addr soundDevice.buffers[buffer][][0], - snd_pcm_uframes_t(soundDevice.buffers[buffer][].len), - ) - if ret < 0: - checkAlsaResult snd_pcm_recover(soundDevice.handle, cint(ret), 0) - -proc CloseSoundDevice*(soundDevice: NativeSoundDevice) = - discard snd_pcm_close(soundDevice.handle)
--- a/semicongine/audio/platform/windows.nim Sat Jan 11 12:22:21 2025 +0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,59 +0,0 @@ -import std/os - -import ../../thirdparty/winim/winim/inc/[mmsystem, windef] - -template CheckWinMMResult*(call: untyped) = - let value = call - if value < 0: - raise newException( - Exception, "Windows multimedia error: " & astToStr(call) & " returned " & $value - ) - -proc OpenSoundDevice*( - sampleRate: uint32, buffers: seq[ptr SoundData] -): NativeSoundDevice = - var format = WAVEFORMATEX( - wFormatTag: WAVE_FORMAT_PCM, - nChannels: 2, - nSamplesPerSec: DWORD(sampleRate), - nAvgBytesPerSec: DWORD(sampleRate) * 4, - nBlockAlign: 4, - wBitsPerSample: 16, - cbSize: 0, - ) - CheckWinMMResult waveOutOpen( - addr result.handle, - WAVE_MAPPER, - addr format, - DWORD_PTR(0), - DWORD_PTR(0), - CALLBACK_NULL, - ) - - for i in 0 ..< buffers.len: - result.buffers.add WAVEHDR( - lpData: cast[ptr CHAR](addr buffers[i][][0]), - dwBufferLength: DWORD(buffers[i][].len * sizeof(Sample)), - dwLoops: 1, - ) - for i in 0 ..< result.buffers.len: - CheckWinMMResult waveOutPrepareHeader( - result.handle, addr result.buffers[i], UINT(sizeof(WAVEHDR)) - ) - CheckWinMMResult waveOutWrite( - result.handle, addr result.buffers[i], UINT(sizeof(WAVEHDR)) - ) - -proc WriteSoundData*(soundDevice: var NativeSoundDevice, buffer: int) = - while (soundDevice.buffers[buffer].dwFlags and WHDR_DONE) == 0: - sleep(1) - CheckWinMMResult waveOutWrite( - soundDevice.handle, addr soundDevice.buffers[buffer], UINT(sizeof(WAVEHDR)) - ) - -proc CloseSoundDevice*(soundDevice: var NativeSoundDevice) = - for i in 0 ..< soundDevice.buffers.len: - discard waveOutUnprepareHeader( - soundDevice.handle, addr soundDevice.buffers[i], UINT(sizeof(WAVEHDR)) - ) - waveOutClose(soundDevice.handle)
--- a/semicongine/audio/resources.nim Sat Jan 11 12:22:21 2025 +0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,114 +0,0 @@ -import std/endians -import std/os -import std/streams -import std/strformat - -import ../core - -type - Encoding {.size: sizeof(uint32).} = enum - # Unspecified = 0 - # Uint8Ulaw = 1 - # Int8 = 2 - Int16 = 3 - # Int24 = 4 - # Int32 = 5 - # Float32 = 6 - # Float64 = 7 - - AuHeader = object - magicNumber: uint32 - dataOffset: uint32 - dataSize: uint32 - encoding: Encoding - sampleRate: uint32 - channels: uint32 - -proc readSample(stream: Stream, encoding: Encoding, channels: int): Sample = - result[0] = stream.readint16() - swapEndian16(addr result[0], addr result[0]) - - if channels == 2: - result[1] = stream.readint16() - swapEndian16(addr result[1], addr result[1]) - else: - result[1] = result[0] - -# https://en.wikipedia.org/wiki/Au_file_format -proc readAU*(stream: Stream): SoundData = - var header: AuHeader - - for name, value in fieldPairs(header): - var bytes: array[4, uint8] - stream.read(bytes) - swap(bytes[0], bytes[3]) - swap(bytes[1], bytes[2]) - value = cast[typeof(value)](bytes) - - assert header.magicNumber == 0x2e736e64 - if header.sampleRate != AUDIO_SAMPLE_RATE: - raise newException( - Exception, - &"Only support sample rate of {AUDIO_SAMPLE_RATE} Hz but got {header.sampleRate} Hz, please resample (e.g. ffmpeg -i <infile> -ar {AUDIO_SAMPLE_RATE} <outfile>)", - ) - if not (header.channels in [1'u32, 2'u32]): - raise newException( - Exception, - "Only support mono and stereo audio at the moment (1 or 2 channels), but found " & - $header.channels, - ) - - var annotation: string - stream.read(annotation) - - stream.setPosition(int(header.dataOffset)) - while not stream.atEnd(): - result.add stream.readSample(header.encoding, int(header.channels)) - -{.compile: currentSourcePath.parentDir().parentDir() & "/thirdparty/stb/stb_vorbis.c".} - -proc stb_vorbis_decode_memory( - mem: pointer, - len: cint, - channels: ptr cint, - sample_rate: ptr cint, - output: ptr ptr cshort, -): cint {.importc.} - -proc readVorbis*(stream: Stream): SoundData = - var - data = stream.readAll() - channels: cint - sampleRate: cint - output: ptr cshort - - var nSamples = stb_vorbis_decode_memory( - addr data[0], cint(data.len), addr channels, addr sampleRate, addr output - ) - - if nSamples < 0: - raise newException( - Exception, &"Unable to read ogg/vorbis sound file, error code: {nSamples}" - ) - if sampleRate != AUDIO_SAMPLE_RATE: - raise newException( - Exception, - &"Only support sample rate of {AUDIO_SAMPLE_RATE} Hz but got {sampleRate} Hz, please resample (e.g. ffmpeg -i <infile> -acodec libvorbis -ar {AUDIO_SAMPLE_RATE} <outfile>)", - ) - - if channels == 2: - result.setLen(int(nSamples)) - copyMem(addr result[0], output, nSamples * sizeof(Sample)) - nativeFree(output) - elif channels == 1: - for i in 0 ..< nSamples: - let value = cast[ptr UncheckedArray[int16]](output)[i] - result.add [value, value] - nativeFree(output) - else: - nativeFree(output) - raise newException( - Exception, - "Only support mono and stereo audio at the moment (1 or 2 channels), but found " & - $channels, - )
--- a/semicongine/background_loader.nim Sat Jan 11 12:22:21 2025 +0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,46 +0,0 @@ -import std/syncio -import std/tables - -import ./core - -proc loader[T](args: LoaderThreadArgs[T]) {.thread.} = - while true: - let (path, package) = args[0][].recv() - try: - args[1][].send( - LoaderResponse[T](path: path, package: package, data: args[2](path, package)) - ) - except Exception as e: - args[1][].send(LoaderResponse[T](path: path, package: package, error: e.msg)) - -proc fetchAll*(ld: var BackgroundLoader) = - var (hasData, response) = ld.responseCn.tryRecv() - while hasData: - ld.responseTable[response.package & ":" & response.path] = response - (hasData, response) = ld.responseCn.tryRecv() - -proc requestLoading*(ld: var BackgroundLoader, path, package: string) = - ld.loadRequestCn.send((path, package)) - -proc isLoaded*(ld: var BackgroundLoader, path, package: string): bool = - fetchAll(ld) - (package & ":" & path) in ld.responseTable - -proc getLoadedData*[T](ld: var BackgroundLoader[T], path, package: string): T = - var item: LoaderResponse[T] - doAssert ld.responseTable.pop(package & ":" & path, item) - if item.error != "": - raise newException(Exception, item.error) - result = item.data - -proc initBackgroundLoader*[T]( - loadFn: proc(path, package: string): T {.gcsafe.} -): ptr BackgroundLoader[T] = - result = createShared(BackgroundLoader[T]) - open(result.loadRequestCn) - open(result.responseCn) - createThread[LoaderThreadArgs[T]]( - result.worker, - loader[T], - (addr result.loadRequestCn, addr result.responseCn, loadFn), - )
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/semicongine/background_loaders.nim Sat Jan 11 14:04:39 2025 +0700 @@ -0,0 +1,46 @@ +import std/syncio +import std/tables + +import ./core + +proc loader[T](args: LoaderThreadArgs[T]) {.thread.} = + while true: + let (path, package) = args[0][].recv() + try: + args[1][].send( + LoaderResponse[T](path: path, package: package, data: args[2](path, package)) + ) + except Exception as e: + args[1][].send(LoaderResponse[T](path: path, package: package, error: e.msg)) + +proc fetchAll*(ld: var BackgroundLoader) = + var (hasData, response) = ld.responseCn.tryRecv() + while hasData: + ld.responseTable[response.package & ":" & response.path] = response + (hasData, response) = ld.responseCn.tryRecv() + +proc requestLoading*(ld: var BackgroundLoader, path, package: string) = + ld.loadRequestCn.send((path, package)) + +proc isLoaded*(ld: var BackgroundLoader, path, package: string): bool = + fetchAll(ld) + (package & ":" & path) in ld.responseTable + +proc getLoadedData*[T](ld: var BackgroundLoader[T], path, package: string): T = + var item: LoaderResponse[T] + doAssert ld.responseTable.pop(package & ":" & path, item) + if item.error != "": + raise newException(Exception, item.error) + result = item.data + +proc initBackgroundLoader*[T]( + loadFn: proc(path, package: string): T {.gcsafe.} +): ptr BackgroundLoader[T] = + result = createShared(BackgroundLoader[T]) + open(result.loadRequestCn) + open(result.responseCn) + createThread[LoaderThreadArgs[T]]( + result.worker, + loader[T], + (addr result.loadRequestCn, addr result.responseCn, loadFn), + )
--- a/semicongine/core/types.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/semicongine/core/types.nim Sat Jan 11 14:04:39 2025 +0700 @@ -479,6 +479,48 @@ # === steam === SteamUserStatsRef* = ptr object + # === glTF === + GltfNode* = object + children*: seq[int] + mesh*: int = -1 + transform*: Mat4 = Unit4 + + GltfData*[TMesh, TMaterial] = object + scenes*: seq[seq[int]] # each scene has a seq of node indices + nodes*: seq[GltfNode] # each node has a seq of mesh indices + meshes*: seq[seq[(TMesh, VkPrimitiveTopology)]] + materials*: seq[TMaterial] + textures*: seq[Image[BGRA]] + + MaterialAttributeNames* = object # pbr + baseColorTexture*: string + baseColorTextureUv*: string + baseColorFactor*: string + metallicRoughnessTexture*: string + metallicRoughnessTextureUv*: string + metallicFactor*: string + roughnessFactor*: string + + # other + normalTexture*: string + normalTextureUv*: string + occlusionTexture*: string + occlusionTextureUv*: string + emissiveTexture*: string + emissiveTextureUv*: string + emissiveFactor*: string + + MeshAttributeNames* = object + POSITION*: string + NORMAL*: string + TANGENT*: string + TEXCOORD*: seq[string] + COLOR*: seq[string] + JOINTS*: seq[string] + WEIGHTS*: seq[string] + indices*: string + material*: string + # === global engine object === EngineObj = object initialized*: bool @@ -500,6 +542,10 @@ Engine* = ref EngineObj +# fixed value for non-array images +template nLayers*(image: Image): untyped = + 1'u32 + # prevent object copies proc `=copy`(dest: var VulkanObject, source: VulkanObject) {.error.} @@ -522,3 +568,6 @@ proc `=copy`[MaxGlyphs: static int]( dest: var TextBuffer[MaxGlyphs], source: TextBuffer[MaxGlyphs] ) {.error.} + +proc `=copy`(dest: var GltfNode, source: GltfNode) {.error.} +proc `=copy`[S, T](dest: var GltfData[S, T], source: GltfData[S, T]) {.error.}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/semicongine/font.nim Sat Jan 11 14:04:39 2025 +0700 @@ -0,0 +1,252 @@ +import std/os +import std/strutils +import std/strformat +import std/sequtils +import std/unicode +import std/streams +import std/logging +import std/tables + +import ./core +import ./resources +import ./rendering/renderer +import ./rendering/memory +import ./contrib/algorithms/texture_packing + +{.emit: "#define STBTT_STATIC".} +{.emit: "#define STB_TRUETYPE_IMPLEMENTATION".} +{. + emit: + "#include \"" & currentSourcePath.parentDir() & "/thirdparty/stb/stb_truetype.h\"" +.} + +const ASCII_CHARSET = PrintableChars.toSeq.toRunes + +type stbtt_fontinfo {.importc, incompleteStruct.} = object + +proc stbtt_InitFont( + info: ptr stbtt_fontinfo, data: ptr char, offset: cint +): cint {.importc, nodecl.} + +proc stbtt_ScaleForPixelHeight( + info: ptr stbtt_fontinfo, pixels: cfloat +): cfloat {.importc, nodecl.} + +proc stbtt_GetCodepointBitmap( + info: ptr stbtt_fontinfo, + scale_x: cfloat, + scale_y: cfloat, + codepoint: cint, + width, height, xoff, yoff: ptr cint, +): cstring {.importc, nodecl.} + +proc stbtt_GetCodepointBox( + info: ptr stbtt_fontinfo, codepoint: cint, x0, y0, x1, y1: ptr cint +): cint {.importc, nodecl.} + +proc stbtt_GetCodepointHMetrics( + info: ptr stbtt_fontinfo, codepoint: cint, advance, leftBearing: ptr cint +) {.importc, nodecl.} + +proc stbtt_GetCodepointKernAdvance( + info: ptr stbtt_fontinfo, ch1, ch2: cint +): cint {.importc, nodecl.} + +proc stbtt_FindGlyphIndex( + info: ptr stbtt_fontinfo, codepoint: cint +): cint {.importc, nodecl.} + +proc stbtt_GetFontVMetrics( + info: ptr stbtt_fontinfo, ascent, descent, lineGap: ptr cint +) {.importc, nodecl.} + +proc readTrueType[N: static int]( + stream: Stream, name: string, codePoints: seq[Rune], lineHeightPixels: float32 +): Font[N] = + assert codePoints.len <= N, + "asked for " & $codePoints.len & " glyphs but shader is only configured for " & $N + + result = Font[N]() + + var + indata = stream.readAll() + fi: stbtt_fontinfo + if stbtt_InitFont(addr fi, indata.ToCPointer, 0) == 0: + raise newException(Exception, "An error occured while loading font file") + + let + glyph2bitmapScale = + float32(stbtt_ScaleForPixelHeight(addr fi, cfloat(lineHeightPixels))) + glyph2QuadScale = glyph2bitmapScale / lineHeightPixels + + # ensure all codepoints are available in the font + for codePoint in codePoints: + if stbtt_FindGlyphIndex(addr fi, cint(codePoint)) == 0: + warn &"Loading font {name}: Codepoint '{codePoint}' ({cint(codePoint)}) has no glyph" + + var + offsetY: Table[Rune, cint] + offsetX: Table[Rune, cint] + bitmaps: seq[Image[Gray]] + + # render all glyphs to bitmaps and store quad geometry info + for codePoint in codePoints: + offsetX[codePoint] = 0 + offsetY[codePoint] = 0 + var width, height: cint + let data = stbtt_GetCodepointBitmap( + addr fi, + glyph2bitmapScale, + glyph2bitmapScale, + cint(codePoint), + addr width, + addr height, + addr (offsetX[codePoint]), + addr (offsetY[codePoint]), + ) + + if width > 0 and height > 0: + var bitmap = newSeq[Gray](width * height) + for i in 0 ..< width * height: + bitmap[i] = vec1u8(data[i].uint8) + bitmaps.add Image[Gray](width: width.uint32, height: height.uint32, data: bitmap) + else: + bitmaps.add Image[Gray](width: 1, height: 1, data: @[vec1u8(0)]) + + nativeFree(data) + + # generate glyph atlas from bitmaps + let packed = pack(bitmaps) + result.descriptorSet.data.fontAtlas = packed.atlas + + # generate quad-information for use in shader + for i in 0 ..< codePoints.len: + let codePoint = codePoints[i] + var advanceUnscaled, leftBearingUnscaled: cint + # is in glyph-space, needs to be scaled to pixel-space + stbtt_GetCodepointHMetrics( + addr fi, cint(codePoint), addr advanceUnscaled, addr leftBearingUnscaled + ) + result.leftBearing[codePoint] = leftBearingUnscaled.float32 * glyph2QuadScale + result.advance[codePoint] = advanceUnscaled.float32 * glyph2QuadScale + + let + atlasW = float32(result.descriptorSet.data.fontAtlas.width) + atlasH = float32(result.descriptorSet.data.fontAtlas.height) + uv = vec2(packed.coords[i].x, packed.coords[i].y) + bitmapW = float32(bitmaps[i].width) + bitmapH = float32(bitmaps[i].height) + # divide by lineHeightPixels to get from pixel-space to quad-geometry-space + left = + result.leftBearing[codePoint] + offsetX[codePoint].float32 / lineHeightPixels + right = left + bitmapW / lineHeightPixels + top = -offsetY[codePoint].float32 / lineHeightPixels + bottom = top - bitmapH / lineHeightPixels + + template glyphquads(): untyped = + result.descriptorSet.data.glyphquads.data + + glyphquads.pos[i] = vec4(left, bottom, right, top) + glyphquads.uv[i] = vec4( + (uv.x + 0.5) / atlasW, # left + (uv.y + bitmapH - 0.5) / atlasH, # bottom + (uv.x + bitmapW - 0.5) / atlasW, # right + (uv.y + 0.5) / atlasH, # top + ) + if i == 0: + result.fallbackCharacter = codePoint + result.descriptorGlyphIndex[codePoint] = i.uint16 + result.descriptorGlyphIndexRev[i.uint16] = codePoint # only used for debugging atm + + # kerning + for codePointAfter in codePoints: + result.kerning[(codePoint, codePointAfter)] = + float32( + stbtt_GetCodepointKernAdvance(addr fi, cint(codePoint), cint(codePointAfter)) + ) * glyph2QuadScale + + # line spacing + var ascent, descent, lineGap: cint + stbtt_GetFontVMetrics(addr fi, addr ascent, addr descent, addr lineGap) + result.lineAdvance = float32(ascent - descent + lineGap) * glyph2QuadScale + result.lineHeight = float32(ascent - descent) * glyph2QuadScale # should be 1 + result.ascent = float32(ascent) * glyph2QuadScale + result.descent = float32(descent) * glyph2QuadScale + + var x0, y0, x1, y1: cint + discard + stbtt_GetCodepointBox(addr fi, cint(Rune('x')), addr x0, addr y0, addr x1, addr y1) + result.xHeight = float32(y1 - y0) * glyph2QuadScale + +proc loadFont*[N: static int]( + path: string, + lineHeightPixels = 80'f32, + additional_codepoints: openArray[Rune] = [], + charset = ASCII_CHARSET, + package = DEFAULT_PACKAGE, +): Font[N] = + readTrueType[N]( + loadResource_intern(path, package = package), + path.splitFile().name, + charset & additional_codepoints.toSeq, + lineHeightPixels, + ) + +proc upload*(font: Font, renderdata: var RenderData) = + assert font.descriptorSet.vk.allIt(not it.Valid), "Font was alread uploaded" + assignBuffers(renderdata, font.descriptorSet) + uploadImages(renderdata, font.descriptorSet) + +proc addToPipeline*(font: Font, renderdata: RenderData, pipeline: Pipeline) = + initDescriptorSet(renderdata, pipeline.layout(3), font.descriptorSet) + +proc bindTo*(font: Font, pipeline: Pipeline, commandbuffer: VkCommandBuffer) = + bindDescriptorSet(commandbuffer, font.descriptorSet, 3, pipeline) + +#[ +# needs to be adjusted to work correctly with new metrics code in text.nim +func wordWrapped*(text: seq[Rune], font: FontObj, maxWidth: float32): seq[Rune] = + var remaining: seq[seq[Rune]] = @[@[]] + for c in text: + if c == SPACE: + remaining.add newSeq[Rune]() + else: + remaining[^1].add c + remaining.reverse() + + var currentLine: seq[Rune] + + while remaining.len > 0: + var currentWord = remaining.pop() + assert not (SPACE in currentWord) + + if currentWord.len == 0: + currentLine.add SPACE + else: + assert currentWord[^1] != SPACE + # if this is the first word of the line and it is too long we need to + # split by character + if currentLine.len == 0 and (SPACE & currentWord).textWidth(font) > maxWidth: + var subWord = @[currentWord[0]] + for c in currentWord[1 .. ^1]: + if (subWord & c).textWidth(font) > maxWidth: + break + subWord.add c + result.add subWord & NEWLINE + remaining.add currentWord[subWord.len .. ^1] + # process rest of the word in next iteration + else: + if (currentLine & SPACE & currentWord).textWidth(font) <= maxWidth: + if currentLine.len == 0: + currentLine = currentWord + else: + currentLine = currentLine & SPACE & currentWord + else: + result.add currentLine & NEWLINE + remaining.add currentWord + currentLine = @[] + if currentLine.len > 0 and currentLine != @[SPACE]: + result.add currentLine + + return result + ]#
--- a/semicongine/gltf.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/semicongine/gltf.nim Sat Jan 11 14:04:39 2025 +0700 @@ -9,18 +9,6 @@ import ./image type - GltfNode* = object - children*: seq[int] - mesh*: int = -1 - transform*: Mat4 = Unit4 - - GltfData*[TMesh, TMaterial] = object - scenes*: seq[seq[int]] # each scene has a seq of node indices - nodes*: seq[GltfNode] # each node has a seq of mesh indices - meshes*: seq[seq[(TMesh, VkPrimitiveTopology)]] - materials*: seq[TMaterial] - textures*: seq[Image[BGRA]] - glTFHeader = object magic: uint32 version: uint32 @@ -30,39 +18,8 @@ structuredContent: JsonNode binaryBufferData: seq[uint8] - MaterialAttributeNames* = object # pbr - baseColorTexture*: string - baseColorTextureUv*: string - baseColorFactor*: string - metallicRoughnessTexture*: string - metallicRoughnessTextureUv*: string - metallicFactor*: string - roughnessFactor*: string - - # other - normalTexture*: string - normalTextureUv*: string - occlusionTexture*: string - occlusionTextureUv*: string - emissiveTexture*: string - emissiveTextureUv*: string - emissiveFactor*: string - - MeshAttributeNames* = object - POSITION*: string - NORMAL*: string - TANGENT*: string - TEXCOORD*: seq[string] - COLOR*: seq[string] - JOINTS*: seq[string] - WEIGHTS*: seq[string] - indices*: string - material*: string - -proc `=copy`(dest: var GltfNode, source: GltfNode) {.error.} proc `=copy`(dest: var glTFHeader, source: glTFHeader) {.error.} proc `=copy`(dest: var glTFData, source: glTFData) {.error.} -proc `=copy`[S, T](dest: var GltfData[S, T], source: GltfData[S, T]) {.error.} const HEADER_MAGIC = 0x46546C67
--- a/semicongine/image.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/semicongine/image.nim Sat Jan 11 14:04:39 2025 +0700 @@ -18,9 +18,6 @@ desired_channels: cint, ): ptr uint8 {.importc, nodecl.} -template nLayers*(image: Image): untyped = - 1'u32 - func `$`*[S, IsArray](img: ImageObject[S, IsArray]): string = let pixelTypeName = S.name if IsArray == false:
--- a/semicongine/loaders.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/semicongine/loaders.nim Sat Jan 11 14:04:39 2025 +0700 @@ -5,13 +5,12 @@ import std/streams import ./audio -import ./background_loader +import ./background_loaders import ./core import ./gltf import ./image import ./resources import ./thirdparty/parsetoml -export parsetoml proc loadBytes*(path, package: string): seq[byte] {.gcsafe.} = cast[seq[byte]](toSeq(path.loadResource_intern(package = package).readAll()))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/semicongine/platform/linux/audio.nim Sat Jan 11 14:04:39 2025 +0700 @@ -0,0 +1,87 @@ +{.pragma: alsafunc, importc, cdecl, dynlib: "libasound.so.2".} +proc snd_pcm_open*( + pcm_ref: ptr snd_pcm_p, name: cstring, streamMode: StreamMode, openmode: OpenMode +): cint {.alsafunc.} + +proc snd_pcm_close*(pcm: snd_pcm_p): cint {.alsafunc.} +proc snd_pcm_hw_params_malloc*( + hw_params_ptr: ptr snd_pcm_hw_params_p +): cint {.alsafunc.} + +proc snd_pcm_hw_params_free*(hw_params: snd_pcm_hw_params_p) {.alsafunc.} +proc snd_pcm_hw_params_any*( + pcm: snd_pcm_p, params: snd_pcm_hw_params_p +): cint {.alsafunc.} + +proc snd_pcm_hw_params_set_access*( + pcm: snd_pcm_p, params: snd_pcm_hw_params_p, mode: AccessMode +): cint {.alsafunc.} + +proc snd_pcm_hw_params_set_format*( + pcm: snd_pcm_p, params: snd_pcm_hw_params_p, format: PCMFormat +): cint {.alsafunc.} + +proc snd_pcm_hw_params_set_channels*( + pcm: snd_pcm_p, params: snd_pcm_hw_params_p, val: cuint +): cint {.alsafunc.} + +proc snd_pcm_hw_params_set_buffer_size*( + pcm: snd_pcm_p, params: snd_pcm_hw_params_p, size: snd_pcm_uframes_t +): cint {.alsafunc.} + +proc snd_pcm_hw_params_set_rate*( + pcm: snd_pcm_p, params: snd_pcm_hw_params_p, val: cuint, dir: cint +): cint {.alsafunc.} + +proc snd_pcm_hw_params*(pcm: snd_pcm_p, params: snd_pcm_hw_params_p): cint {.alsafunc.} +proc snd_pcm_writei*( + pcm: snd_pcm_p, buffer: pointer, size: snd_pcm_uframes_t +): snd_pcm_sframes_t {.alsafunc.} + +proc snd_pcm_recover*(pcm: snd_pcm_p, err: cint, silent: cint): cint {.alsafunc.} + +template checkAlsaResult(call: untyped) = + let value = call + if value < 0: + raise + newException(Exception, "Alsa error: " & astToStr(call) & " returned " & $value) + +# required for engine: + +proc OpenSoundDevice*( + sampleRate: uint32, buffers: seq[ptr SoundData] +): NativeSoundDevice = + var hw_params: snd_pcm_hw_params_p = nil + checkAlsaResult snd_pcm_open( + addr result.handle, "default", SND_PCM_STREAM_PLAYBACK, SND_PCM_BLOCK + ) + + # hw parameters, quiet a bit of hardcoding here + checkAlsaResult snd_pcm_hw_params_malloc(addr hw_params) + checkAlsaResult snd_pcm_hw_params_any(result.handle, hw_params) + checkAlsaResult snd_pcm_hw_params_set_access( + result.handle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED + ) + checkAlsaResult snd_pcm_hw_params_set_format( + result.handle, hw_params, SND_PCM_FORMAT_S16_LE + ) + checkAlsaResult snd_pcm_hw_params_set_rate(result.handle, hw_params, sampleRate, 0) + checkAlsaResult snd_pcm_hw_params_set_channels(result.handle, hw_params, 2) + checkAlsaResult snd_pcm_hw_params_set_buffer_size( + result.handle, hw_params, snd_pcm_uframes_t(buffers[0][].len) + ) + checkAlsaResult snd_pcm_hw_params(result.handle, hw_params) + snd_pcm_hw_params_free(hw_params) + result.buffers = buffers + +proc WriteSoundData*(soundDevice: NativeSoundDevice, buffer: int) = + var ret = snd_pcm_writei( + soundDevice.handle, + addr soundDevice.buffers[buffer][][0], + snd_pcm_uframes_t(soundDevice.buffers[buffer][].len), + ) + if ret < 0: + checkAlsaResult snd_pcm_recover(soundDevice.handle, cint(ret), 0) + +proc CloseSoundDevice*(soundDevice: NativeSoundDevice) = + discard snd_pcm_close(soundDevice.handle)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/semicongine/platform/windows/audio.nim Sat Jan 11 14:04:39 2025 +0700 @@ -0,0 +1,59 @@ +import std/os + +import ../../thirdparty/winim/winim/inc/[mmsystem, windef] + +template CheckWinMMResult*(call: untyped) = + let value = call + if value < 0: + raise newException( + Exception, "Windows multimedia error: " & astToStr(call) & " returned " & $value + ) + +proc OpenSoundDevice*( + sampleRate: uint32, buffers: seq[ptr SoundData] +): NativeSoundDevice = + var format = WAVEFORMATEX( + wFormatTag: WAVE_FORMAT_PCM, + nChannels: 2, + nSamplesPerSec: DWORD(sampleRate), + nAvgBytesPerSec: DWORD(sampleRate) * 4, + nBlockAlign: 4, + wBitsPerSample: 16, + cbSize: 0, + ) + CheckWinMMResult waveOutOpen( + addr result.handle, + WAVE_MAPPER, + addr format, + DWORD_PTR(0), + DWORD_PTR(0), + CALLBACK_NULL, + ) + + for i in 0 ..< buffers.len: + result.buffers.add WAVEHDR( + lpData: cast[ptr CHAR](addr buffers[i][][0]), + dwBufferLength: DWORD(buffers[i][].len * sizeof(Sample)), + dwLoops: 1, + ) + for i in 0 ..< result.buffers.len: + CheckWinMMResult waveOutPrepareHeader( + result.handle, addr result.buffers[i], UINT(sizeof(WAVEHDR)) + ) + CheckWinMMResult waveOutWrite( + result.handle, addr result.buffers[i], UINT(sizeof(WAVEHDR)) + ) + +proc WriteSoundData*(soundDevice: var NativeSoundDevice, buffer: int) = + while (soundDevice.buffers[buffer].dwFlags and WHDR_DONE) == 0: + sleep(1) + CheckWinMMResult waveOutWrite( + soundDevice.handle, addr soundDevice.buffers[buffer], UINT(sizeof(WAVEHDR)) + ) + +proc CloseSoundDevice*(soundDevice: var NativeSoundDevice) = + for i in 0 ..< soundDevice.buffers.len: + discard waveOutUnprepareHeader( + soundDevice.handle, addr soundDevice.buffers[i], UINT(sizeof(WAVEHDR)) + ) + waveOutClose(soundDevice.handle)
--- a/semicongine/rendering.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/semicongine/rendering.nim Sat Jan 11 14:04:39 2025 +0700 @@ -26,8 +26,18 @@ when defined(linux): include ./rendering/platform/linux -import ./rendering/vulkan_wrappers -import ./rendering/swapchain +import ../semicongine/rendering/memory +import ../semicongine/rendering/renderer +import ../semicongine/rendering/swapchain +import ../semicongine/rendering/shaders +import ../semicongine/rendering/renderpasses +import ../semicongine/rendering/vulkan_wrappers +export memory +export renderer +export swapchain +export shaders +export renderpasses +export vulkan_wrappers proc debugCallback( messageSeverity: VkDebugUtilsMessageSeverityFlagBitsEXT, @@ -177,6 +187,7 @@ engine().vulkan.instance, engine().vulkan.debugMessenger, nil ) vkDestroyInstance(engine().vulkan.instance, nil) + destroyWindow(engine().vulkan.window) proc showSystemCursor*(value: bool) = engine().vulkan.window.showSystemCursor(value)
--- a/semicongine/rendering/platform/linux.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/semicongine/rendering/platform/linux.nim Sat Jan 11 14:04:39 2025 +0700 @@ -167,6 +167,9 @@ checkXlibResult display.XFreePixmap(pixmap) return NativeWindow(display: display, window: window, emptyCursor: empty_cursor) +proc destroyWindow*(window: NativeWindow) = + checkXlibResult XDestroyWindow(window.display, window.window) + proc setTitle*(window: NativeWindow, title: string) = discard XSetStandardProperties( window.display, window.window, title, "window", 0, nil, 0, nil
--- a/semicongine/rendering/platform/windows.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/semicongine/rendering/platform/windows.nim Sat Jan 11 14:04:39 2025 +0700 @@ -220,6 +220,9 @@ discard result.hwnd.SetForegroundWindow() discard result.hwnd.SetFocus() +proc destroyWindow*(window: NativeWindow) = + DestroyWindow(window.hwnd) + proc setTitle*(window: NativeWindow, title: string) = window.hwnd.SetWindowText(T(title))
--- a/semicongine/text/font.nim Sat Jan 11 12:22:21 2025 +0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,253 +0,0 @@ -import std/os -import std/strutils -import std/strformat -import std/sequtils -import std/unicode -import std/streams -import std/logging -import std/tables - -import ../core -import ../resources -import ../rendering/renderer -import ../rendering/memory -import ../contrib/algorithms/texture_packing - -{.emit: "#define STBTT_STATIC".} -{.emit: "#define STB_TRUETYPE_IMPLEMENTATION".} -{. - emit: - "#include \"" & currentSourcePath.parentDir().parentDir() & - "/thirdparty/stb/stb_truetype.h\"" -.} - -const ASCII_CHARSET = PrintableChars.toSeq.toRunes - -type stbtt_fontinfo {.importc, incompleteStruct.} = object - -proc stbtt_InitFont( - info: ptr stbtt_fontinfo, data: ptr char, offset: cint -): cint {.importc, nodecl.} - -proc stbtt_ScaleForPixelHeight( - info: ptr stbtt_fontinfo, pixels: cfloat -): cfloat {.importc, nodecl.} - -proc stbtt_GetCodepointBitmap( - info: ptr stbtt_fontinfo, - scale_x: cfloat, - scale_y: cfloat, - codepoint: cint, - width, height, xoff, yoff: ptr cint, -): cstring {.importc, nodecl.} - -proc stbtt_GetCodepointBox( - info: ptr stbtt_fontinfo, codepoint: cint, x0, y0, x1, y1: ptr cint -): cint {.importc, nodecl.} - -proc stbtt_GetCodepointHMetrics( - info: ptr stbtt_fontinfo, codepoint: cint, advance, leftBearing: ptr cint -) {.importc, nodecl.} - -proc stbtt_GetCodepointKernAdvance( - info: ptr stbtt_fontinfo, ch1, ch2: cint -): cint {.importc, nodecl.} - -proc stbtt_FindGlyphIndex( - info: ptr stbtt_fontinfo, codepoint: cint -): cint {.importc, nodecl.} - -proc stbtt_GetFontVMetrics( - info: ptr stbtt_fontinfo, ascent, descent, lineGap: ptr cint -) {.importc, nodecl.} - -proc readTrueType[N: static int]( - stream: Stream, name: string, codePoints: seq[Rune], lineHeightPixels: float32 -): Font[N] = - assert codePoints.len <= N, - "asked for " & $codePoints.len & " glyphs but shader is only configured for " & $N - - result = Font[N]() - - var - indata = stream.readAll() - fi: stbtt_fontinfo - if stbtt_InitFont(addr fi, indata.ToCPointer, 0) == 0: - raise newException(Exception, "An error occured while loading font file") - - let - glyph2bitmapScale = - float32(stbtt_ScaleForPixelHeight(addr fi, cfloat(lineHeightPixels))) - glyph2QuadScale = glyph2bitmapScale / lineHeightPixels - - # ensure all codepoints are available in the font - for codePoint in codePoints: - if stbtt_FindGlyphIndex(addr fi, cint(codePoint)) == 0: - warn &"Loading font {name}: Codepoint '{codePoint}' ({cint(codePoint)}) has no glyph" - - var - offsetY: Table[Rune, cint] - offsetX: Table[Rune, cint] - bitmaps: seq[Image[Gray]] - - # render all glyphs to bitmaps and store quad geometry info - for codePoint in codePoints: - offsetX[codePoint] = 0 - offsetY[codePoint] = 0 - var width, height: cint - let data = stbtt_GetCodepointBitmap( - addr fi, - glyph2bitmapScale, - glyph2bitmapScale, - cint(codePoint), - addr width, - addr height, - addr (offsetX[codePoint]), - addr (offsetY[codePoint]), - ) - - if width > 0 and height > 0: - var bitmap = newSeq[Gray](width * height) - for i in 0 ..< width * height: - bitmap[i] = vec1u8(data[i].uint8) - bitmaps.add Image[Gray](width: width.uint32, height: height.uint32, data: bitmap) - else: - bitmaps.add Image[Gray](width: 1, height: 1, data: @[vec1u8(0)]) - - nativeFree(data) - - # generate glyph atlas from bitmaps - let packed = pack(bitmaps) - result.descriptorSet.data.fontAtlas = packed.atlas - - # generate quad-information for use in shader - for i in 0 ..< codePoints.len: - let codePoint = codePoints[i] - var advanceUnscaled, leftBearingUnscaled: cint - # is in glyph-space, needs to be scaled to pixel-space - stbtt_GetCodepointHMetrics( - addr fi, cint(codePoint), addr advanceUnscaled, addr leftBearingUnscaled - ) - result.leftBearing[codePoint] = leftBearingUnscaled.float32 * glyph2QuadScale - result.advance[codePoint] = advanceUnscaled.float32 * glyph2QuadScale - - let - atlasW = float32(result.descriptorSet.data.fontAtlas.width) - atlasH = float32(result.descriptorSet.data.fontAtlas.height) - uv = vec2(packed.coords[i].x, packed.coords[i].y) - bitmapW = float32(bitmaps[i].width) - bitmapH = float32(bitmaps[i].height) - # divide by lineHeightPixels to get from pixel-space to quad-geometry-space - left = - result.leftBearing[codePoint] + offsetX[codePoint].float32 / lineHeightPixels - right = left + bitmapW / lineHeightPixels - top = -offsetY[codePoint].float32 / lineHeightPixels - bottom = top - bitmapH / lineHeightPixels - - template glyphquads(): untyped = - result.descriptorSet.data.glyphquads.data - - glyphquads.pos[i] = vec4(left, bottom, right, top) - glyphquads.uv[i] = vec4( - (uv.x + 0.5) / atlasW, # left - (uv.y + bitmapH - 0.5) / atlasH, # bottom - (uv.x + bitmapW - 0.5) / atlasW, # right - (uv.y + 0.5) / atlasH, # top - ) - if i == 0: - result.fallbackCharacter = codePoint - result.descriptorGlyphIndex[codePoint] = i.uint16 - result.descriptorGlyphIndexRev[i.uint16] = codePoint # only used for debugging atm - - # kerning - for codePointAfter in codePoints: - result.kerning[(codePoint, codePointAfter)] = - float32( - stbtt_GetCodepointKernAdvance(addr fi, cint(codePoint), cint(codePointAfter)) - ) * glyph2QuadScale - - # line spacing - var ascent, descent, lineGap: cint - stbtt_GetFontVMetrics(addr fi, addr ascent, addr descent, addr lineGap) - result.lineAdvance = float32(ascent - descent + lineGap) * glyph2QuadScale - result.lineHeight = float32(ascent - descent) * glyph2QuadScale # should be 1 - result.ascent = float32(ascent) * glyph2QuadScale - result.descent = float32(descent) * glyph2QuadScale - - var x0, y0, x1, y1: cint - discard - stbtt_GetCodepointBox(addr fi, cint(Rune('x')), addr x0, addr y0, addr x1, addr y1) - result.xHeight = float32(y1 - y0) * glyph2QuadScale - -proc loadFont*[N: static int]( - path: string, - lineHeightPixels = 80'f32, - additional_codepoints: openArray[Rune] = [], - charset = ASCII_CHARSET, - package = DEFAULT_PACKAGE, -): Font[N] = - readTrueType[N]( - loadResource_intern(path, package = package), - path.splitFile().name, - charset & additional_codepoints.toSeq, - lineHeightPixels, - ) - -proc upload*(font: Font, renderdata: var RenderData) = - assert font.descriptorSet.vk.allIt(not it.Valid), "Font was alread uploaded" - assignBuffers(renderdata, font.descriptorSet) - uploadImages(renderdata, font.descriptorSet) - -proc addToPipeline*(font: Font, renderdata: RenderData, pipeline: Pipeline) = - initDescriptorSet(renderdata, pipeline.layout(3), font.descriptorSet) - -proc bindTo*(font: Font, pipeline: Pipeline, commandbuffer: VkCommandBuffer) = - bindDescriptorSet(commandbuffer, font.descriptorSet, 3, pipeline) - -#[ -# needs to be adjusted to work correctly with new metrics code in text.nim -func wordWrapped*(text: seq[Rune], font: FontObj, maxWidth: float32): seq[Rune] = - var remaining: seq[seq[Rune]] = @[@[]] - for c in text: - if c == SPACE: - remaining.add newSeq[Rune]() - else: - remaining[^1].add c - remaining.reverse() - - var currentLine: seq[Rune] - - while remaining.len > 0: - var currentWord = remaining.pop() - assert not (SPACE in currentWord) - - if currentWord.len == 0: - currentLine.add SPACE - else: - assert currentWord[^1] != SPACE - # if this is the first word of the line and it is too long we need to - # split by character - if currentLine.len == 0 and (SPACE & currentWord).textWidth(font) > maxWidth: - var subWord = @[currentWord[0]] - for c in currentWord[1 .. ^1]: - if (subWord & c).textWidth(font) > maxWidth: - break - subWord.add c - result.add subWord & NEWLINE - remaining.add currentWord[subWord.len .. ^1] - # process rest of the word in next iteration - else: - if (currentLine & SPACE & currentWord).textWidth(font) <= maxWidth: - if currentLine.len == 0: - currentLine = currentWord - else: - currentLine = currentLine & SPACE & currentWord - else: - result.add currentLine & NEWLINE - remaining.add currentWord - currentLine = @[] - if currentLine.len > 0 and currentLine != @[SPACE]: - result.add currentLine - - return result - ]#
--- a/tests/test_audio.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/tests/test_audio.nim Sat Jan 11 14:04:39 2025 +0700 @@ -3,6 +3,8 @@ import std/times import ../semicongine +import ../semicongine/audio +import ../semicongine/loaders proc test1() = addSound("test1", sineSoundData(1000, 2, 44100))
--- a/tests/test_gltf.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/tests/test_gltf.nim Sat Jan 11 14:04:39 2025 +0700 @@ -5,6 +5,9 @@ import std/options import ../semicongine +import ../semicongine/rendering +import ../semicongine/loaders +import ../semicongine/input proc test_gltf(time: float32, renderPass: RenderPass) = var renderdata = initRenderData()
--- a/tests/test_rendering.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/tests/test_rendering.nim Sat Jan 11 14:04:39 2025 +0700 @@ -7,6 +7,9 @@ import std/random import ../semicongine +import ../semicongine/rendering +import ../semicongine/input +import ../semicongine/loaders proc test_01_triangle(time: float32, renderPass: RenderPass) = var renderdata = initRenderData()
--- a/tests/test_storage.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/tests/test_storage.nim Sat Jan 11 14:04:39 2025 +0700 @@ -1,6 +1,7 @@ import std/strformat import ../semicongine +import ../semicongine/storage proc testSimple(storage: StorageType) = const TEST_VALUE = 42
--- a/tests/test_text.nim Sat Jan 11 12:22:21 2025 +0700 +++ b/tests/test_text.nim Sat Jan 11 14:04:39 2025 +0700 @@ -10,6 +10,10 @@ import std/unicode import ../semicongine +import ../semicongine/rendering +import ../semicongine/text +import ../semicongine/input +import ../semicongine/font const MAX_CODEPOINTS = 200 const FONTNAME = "Overhaul.ttf"