Cortex: background gameplay recording with chunked .seg segments
Continuously records gameplay in the background when a game is connected. Keeps the last N minutes (configurable) as ~30s self-contained segment files on disk, with auto-trimming and thumbnail generation per segment. C++ CortexRecorder writes .seg binary format in real-time, integrated into StreamingEngine alongside existing ClipRecorder. StreamingManager routes frames to cortex-only engine when not streaming, and enables cortex on the streaming engine during live streams for seamless coexistence. New Cortex tab in bottom navigation with enable toggle, duration presets, storage usage, and session list with thumbnails.
This commit is contained in:
@@ -15,6 +15,9 @@ add_library(lck_streaming SHARED
|
||||
egl_context.cpp
|
||||
composition_pipeline.cpp
|
||||
streaming_engine.cpp
|
||||
clip_recorder.cpp
|
||||
cortex_recorder.cpp
|
||||
faststart.cpp
|
||||
)
|
||||
|
||||
target_include_directories(lck_streaming PRIVATE
|
||||
|
||||
313
app/src/main/cpp/clip_recorder.cpp
Normal file
313
app/src/main/cpp/clip_recorder.cpp
Normal file
@@ -0,0 +1,313 @@
|
||||
#include "clip_recorder.h"
|
||||
#include "faststart.h"
|
||||
|
||||
#include <media/NdkMediaMuxer.h>
|
||||
#include <media/NdkMediaFormat.h>
|
||||
#include <media/NdkMediaCodec.h>
|
||||
#include <android/log.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <cstdio>
|
||||
|
||||
#define TAG "ClipRecorder"
|
||||
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
|
||||
#define LOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
|
||||
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
|
||||
|
||||
void ClipRecorder::Configure(int width, int height,
|
||||
uint32_t audioSampleRate, uint32_t audioChannels_,
|
||||
int audioBitrate_) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
videoWidth = width;
|
||||
videoHeight = height;
|
||||
sampleRate = audioSampleRate;
|
||||
audioChannels = audioChannels_;
|
||||
audioBitrate = audioBitrate_;
|
||||
LOGI("Configured: %dx%d, audio %uHz %uch %dbps", width, height,
|
||||
audioSampleRate, audioChannels_, audioBitrate_);
|
||||
}
|
||||
|
||||
void ClipRecorder::SetVideoFormat(const uint8_t* codecConfig, uint32_t size) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
videoCodecConfig.assign(codecConfig, codecConfig + size);
|
||||
LOGI("Video codec config set: %u bytes", size);
|
||||
}
|
||||
|
||||
void ClipRecorder::SetAudioFormat(const uint8_t* codecConfig, uint32_t size) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
audioCodecConfig.assign(codecConfig, codecConfig + size);
|
||||
LOGI("Audio codec config set: %u bytes", size);
|
||||
}
|
||||
|
||||
void ClipRecorder::Start() {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
gopBuffer.clear();
|
||||
currentGop = GopBuffer();
|
||||
audioBuffer.clear();
|
||||
active = true;
|
||||
LOGI("Started");
|
||||
}
|
||||
|
||||
void ClipRecorder::Stop() {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
active = false;
|
||||
gopBuffer.clear();
|
||||
currentGop = GopBuffer();
|
||||
audioBuffer.clear();
|
||||
videoCodecConfig.clear();
|
||||
audioCodecConfig.clear();
|
||||
LOGI("Stopped");
|
||||
}
|
||||
|
||||
void ClipRecorder::FeedVideoPacket(const uint8_t* data, uint32_t size,
|
||||
int64_t timestampUs, bool isKeyframe) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
if (!active) return;
|
||||
|
||||
if (isKeyframe) {
|
||||
// Finalize current GOP and push to buffer
|
||||
if (!currentGop.samples.empty()) {
|
||||
gopBuffer.push_back(std::move(currentGop));
|
||||
}
|
||||
currentGop = GopBuffer();
|
||||
currentGop.startTimeUs = timestampUs;
|
||||
}
|
||||
|
||||
VideoSample sample;
|
||||
sample.data.assign(data, data + size);
|
||||
sample.timestampUs = timestampUs;
|
||||
sample.isKeyframe = isKeyframe;
|
||||
currentGop.samples.push_back(std::move(sample));
|
||||
|
||||
TrimBuffers();
|
||||
}
|
||||
|
||||
void ClipRecorder::FeedAudioPacket(const uint8_t* data, uint32_t size,
|
||||
int64_t timestampUs) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
if (!active) return;
|
||||
|
||||
AudioSample sample;
|
||||
sample.data.assign(data, data + size);
|
||||
sample.timestampUs = timestampUs;
|
||||
audioBuffer.push_back(std::move(sample));
|
||||
|
||||
TrimBuffers();
|
||||
}
|
||||
|
||||
void ClipRecorder::TrimBuffers() {
|
||||
// Trim oldest GOPs to stay under MAX_BUFFER_DURATION_US
|
||||
if (gopBuffer.size() < 2) return;
|
||||
|
||||
int64_t newestTs = 0;
|
||||
if (!currentGop.samples.empty()) {
|
||||
newestTs = currentGop.samples.back().timestampUs;
|
||||
} else if (!gopBuffer.empty() && !gopBuffer.back().samples.empty()) {
|
||||
newestTs = gopBuffer.back().samples.back().timestampUs;
|
||||
}
|
||||
if (newestTs == 0) return;
|
||||
|
||||
while (!gopBuffer.empty()) {
|
||||
int64_t oldestTs = gopBuffer.front().startTimeUs;
|
||||
if (newestTs - oldestTs > MAX_BUFFER_DURATION_US && gopBuffer.size() > 1) {
|
||||
gopBuffer.pop_front();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Trim audio samples older than oldest remaining video
|
||||
if (!gopBuffer.empty()) {
|
||||
int64_t videoStart = gopBuffer.front().startTimeUs;
|
||||
while (!audioBuffer.empty() && audioBuffer.front().timestampUs < videoStart) {
|
||||
audioBuffer.pop_front();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool ClipRecorder::FlushClip(const std::string& outputDir) {
|
||||
// Copy data under lock
|
||||
std::vector<GopBuffer> gopsCopy;
|
||||
std::vector<AudioSample> audioCopy;
|
||||
std::vector<uint8_t> videoCsdCopy;
|
||||
std::vector<uint8_t> audioCsdCopy;
|
||||
int w, h;
|
||||
uint32_t sr, ch;
|
||||
int abr;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
if (!active) return false;
|
||||
if (gopBuffer.empty()) {
|
||||
LOGW("FlushClip: no complete GOPs");
|
||||
return false;
|
||||
}
|
||||
if (videoCodecConfig.empty()) {
|
||||
LOGW("FlushClip: no video codec config");
|
||||
return false;
|
||||
}
|
||||
|
||||
gopsCopy.assign(gopBuffer.begin(), gopBuffer.end());
|
||||
audioCopy.assign(audioBuffer.begin(), audioBuffer.end());
|
||||
videoCsdCopy = videoCodecConfig;
|
||||
audioCsdCopy = audioCodecConfig;
|
||||
w = videoWidth;
|
||||
h = videoHeight;
|
||||
sr = sampleRate;
|
||||
ch = audioChannels;
|
||||
abr = audioBitrate;
|
||||
}
|
||||
|
||||
// Mux to temp file
|
||||
std::string tempPath = outputDir + "/preview_clip_tmp.mp4";
|
||||
std::string finalPath = outputDir + "/preview_clip.mp4";
|
||||
|
||||
int fd = open(tempPath.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644);
|
||||
if (fd < 0) {
|
||||
LOGE("FlushClip: failed to open temp file: %s", tempPath.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
AMediaMuxer* muxer = AMediaMuxer_new(fd, AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4);
|
||||
if (!muxer) {
|
||||
LOGE("FlushClip: failed to create muxer");
|
||||
close(fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Video track — split Annex-B SPS+PPS into separate csd-0 (SPS) and csd-1 (PPS)
|
||||
// MediaCodec CODEC_CONFIG output: [00 00 00 01 SPS] [00 00 00 01 PPS]
|
||||
// AMediaMuxer expects csd-0 = SPS NAL (with start code), csd-1 = PPS NAL (with start code)
|
||||
const uint8_t* csd = videoCsdCopy.data();
|
||||
size_t csdSize = videoCsdCopy.size();
|
||||
size_t spsLen = csdSize;
|
||||
size_t ppsOffset = 0;
|
||||
size_t ppsLen = 0;
|
||||
|
||||
// Find the second start code (00 00 00 01) to split SPS from PPS
|
||||
for (size_t i = 4; i + 3 < csdSize; i++) {
|
||||
if (csd[i] == 0x00 && csd[i+1] == 0x00 && csd[i+2] == 0x00 && csd[i+3] == 0x01) {
|
||||
spsLen = i;
|
||||
ppsOffset = i;
|
||||
ppsLen = csdSize - i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
LOGI("FlushClip: csd total=%zu, SPS=%zu bytes, PPS=%zu bytes", csdSize, spsLen, ppsLen);
|
||||
|
||||
AMediaFormat* videoFormat = AMediaFormat_new();
|
||||
AMediaFormat_setString(videoFormat, AMEDIAFORMAT_KEY_MIME, "video/avc");
|
||||
AMediaFormat_setInt32(videoFormat, AMEDIAFORMAT_KEY_WIDTH, w);
|
||||
AMediaFormat_setInt32(videoFormat, AMEDIAFORMAT_KEY_HEIGHT, h);
|
||||
AMediaFormat_setBuffer(videoFormat, "csd-0", csd, spsLen);
|
||||
if (ppsLen > 0) {
|
||||
AMediaFormat_setBuffer(videoFormat, "csd-1", csd + ppsOffset, ppsLen);
|
||||
}
|
||||
|
||||
ssize_t videoTrack = AMediaMuxer_addTrack(muxer, videoFormat);
|
||||
AMediaFormat_delete(videoFormat);
|
||||
|
||||
if (videoTrack < 0) {
|
||||
LOGE("FlushClip: failed to add video track");
|
||||
AMediaMuxer_delete(muxer);
|
||||
close(fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Audio track (optional — may not have audio config)
|
||||
ssize_t audioTrack = -1;
|
||||
if (!audioCsdCopy.empty()) {
|
||||
AMediaFormat* audioFormat = AMediaFormat_new();
|
||||
AMediaFormat_setString(audioFormat, AMEDIAFORMAT_KEY_MIME, "audio/mp4a-latm");
|
||||
AMediaFormat_setInt32(audioFormat, AMEDIAFORMAT_KEY_SAMPLE_RATE, sr);
|
||||
AMediaFormat_setInt32(audioFormat, AMEDIAFORMAT_KEY_CHANNEL_COUNT, ch);
|
||||
AMediaFormat_setInt32(audioFormat, AMEDIAFORMAT_KEY_BIT_RATE, abr);
|
||||
AMediaFormat_setBuffer(audioFormat, "csd-0", audioCsdCopy.data(), audioCsdCopy.size());
|
||||
|
||||
audioTrack = AMediaMuxer_addTrack(muxer, audioFormat);
|
||||
AMediaFormat_delete(audioFormat);
|
||||
|
||||
if (audioTrack < 0) {
|
||||
LOGW("FlushClip: failed to add audio track, continuing video-only");
|
||||
audioTrack = -1;
|
||||
}
|
||||
}
|
||||
|
||||
media_status_t status = AMediaMuxer_start(muxer);
|
||||
if (status != AMEDIA_OK) {
|
||||
LOGE("FlushClip: failed to start muxer: %d", status);
|
||||
AMediaMuxer_delete(muxer);
|
||||
close(fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Compute base timestamp for zero-based output
|
||||
int64_t baseTs = gopsCopy.front().startTimeUs;
|
||||
|
||||
// Write video samples
|
||||
int videoSamplesWritten = 0;
|
||||
for (const auto& gop : gopsCopy) {
|
||||
for (const auto& sample : gop.samples) {
|
||||
AMediaCodecBufferInfo info;
|
||||
info.offset = 0;
|
||||
info.size = static_cast<int32_t>(sample.data.size());
|
||||
info.presentationTimeUs = sample.timestampUs - baseTs;
|
||||
info.flags = sample.isKeyframe ? AMEDIACODEC_BUFFER_FLAG_KEY_FRAME : 0;
|
||||
|
||||
AMediaMuxer_writeSampleData(muxer, videoTrack,
|
||||
sample.data.data(), &info);
|
||||
videoSamplesWritten++;
|
||||
}
|
||||
}
|
||||
|
||||
// Write audio samples
|
||||
int audioSamplesWritten = 0;
|
||||
if (audioTrack >= 0) {
|
||||
for (const auto& sample : audioCopy) {
|
||||
// Only include audio within the video time range
|
||||
int64_t relTs = sample.timestampUs - baseTs;
|
||||
if (relTs < 0) continue;
|
||||
|
||||
AMediaCodecBufferInfo info;
|
||||
info.offset = 0;
|
||||
info.size = static_cast<int32_t>(sample.data.size());
|
||||
info.presentationTimeUs = relTs;
|
||||
info.flags = 0;
|
||||
|
||||
AMediaMuxer_writeSampleData(muxer, audioTrack,
|
||||
sample.data.data(), &info);
|
||||
audioSamplesWritten++;
|
||||
}
|
||||
}
|
||||
|
||||
AMediaMuxer_stop(muxer);
|
||||
AMediaMuxer_delete(muxer);
|
||||
close(fd);
|
||||
|
||||
LOGI("FlushClip: muxed %d video + %d audio samples to temp file",
|
||||
videoSamplesWritten, audioSamplesWritten);
|
||||
|
||||
// Apply faststart (move moov atom to front)
|
||||
if (!MoovFastStart(tempPath, finalPath)) {
|
||||
LOGW("FlushClip: faststart failed, using non-optimized file");
|
||||
rename(tempPath.c_str(), finalPath.c_str());
|
||||
} else {
|
||||
unlink(tempPath.c_str());
|
||||
}
|
||||
|
||||
LOGI("FlushClip: clip ready at %s", finalPath.c_str());
|
||||
|
||||
if (clipReadyCallback) {
|
||||
clipReadyCallback(finalPath);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ClipRecorder::SetClipReadyCallback(ClipReadyCallback cb) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
clipReadyCallback = std::move(cb);
|
||||
}
|
||||
75
app/src/main/cpp/clip_recorder.h
Normal file
75
app/src/main/cpp/clip_recorder.h
Normal file
@@ -0,0 +1,75 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <deque>
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
struct VideoSample {
|
||||
std::vector<uint8_t> data;
|
||||
int64_t timestampUs;
|
||||
bool isKeyframe;
|
||||
};
|
||||
|
||||
struct AudioSample {
|
||||
std::vector<uint8_t> data;
|
||||
int64_t timestampUs;
|
||||
};
|
||||
|
||||
struct GopBuffer {
|
||||
std::vector<VideoSample> samples;
|
||||
int64_t startTimeUs = 0;
|
||||
};
|
||||
|
||||
class ClipRecorder {
|
||||
public:
|
||||
using ClipReadyCallback = std::function<void(const std::string& path)>;
|
||||
|
||||
ClipRecorder() = default;
|
||||
~ClipRecorder() = default;
|
||||
|
||||
void Configure(int width, int height,
|
||||
uint32_t audioSampleRate, uint32_t audioChannels,
|
||||
int audioBitrate);
|
||||
|
||||
void SetVideoFormat(const uint8_t* codecConfig, uint32_t size);
|
||||
void SetAudioFormat(const uint8_t* codecConfig, uint32_t size);
|
||||
|
||||
void Start();
|
||||
void Stop();
|
||||
|
||||
void FeedVideoPacket(const uint8_t* data, uint32_t size,
|
||||
int64_t timestampUs, bool isKeyframe);
|
||||
void FeedAudioPacket(const uint8_t* data, uint32_t size,
|
||||
int64_t timestampUs);
|
||||
|
||||
bool FlushClip(const std::string& outputDir);
|
||||
|
||||
void SetClipReadyCallback(ClipReadyCallback cb);
|
||||
|
||||
private:
|
||||
void TrimBuffers();
|
||||
|
||||
std::mutex mutex;
|
||||
bool active = false;
|
||||
|
||||
int videoWidth = 0;
|
||||
int videoHeight = 0;
|
||||
uint32_t sampleRate = 48000;
|
||||
uint32_t audioChannels = 2;
|
||||
int audioBitrate = 128000;
|
||||
|
||||
std::vector<uint8_t> videoCodecConfig;
|
||||
std::vector<uint8_t> audioCodecConfig;
|
||||
|
||||
std::deque<GopBuffer> gopBuffer;
|
||||
GopBuffer currentGop;
|
||||
|
||||
std::deque<AudioSample> audioBuffer;
|
||||
|
||||
ClipReadyCallback clipReadyCallback;
|
||||
|
||||
static constexpr int64_t MAX_BUFFER_DURATION_US = 12000000; // 12s
|
||||
};
|
||||
328
app/src/main/cpp/cortex_recorder.cpp
Normal file
328
app/src/main/cpp/cortex_recorder.cpp
Normal file
@@ -0,0 +1,328 @@
|
||||
#include "cortex_recorder.h"
|
||||
|
||||
#include <android/log.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <cstdio>
|
||||
|
||||
#define TAG "CortexRecorder"
|
||||
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
|
||||
#define LOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
|
||||
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
|
||||
|
||||
// Segment binary format magic
|
||||
static const char SEGMENT_MAGIC[4] = {'C', 'S', 'E', 'G'};
|
||||
static const uint32_t SEGMENT_VERSION = 1;
|
||||
static const uint32_t END_MARKER = 0xFFFFFFFF;
|
||||
|
||||
// Helper: write raw bytes to fd
|
||||
static bool writeBytes(int fd, const void* data, size_t size) {
|
||||
return write(fd, data, size) == static_cast<ssize_t>(size);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static bool writeVal(int fd, T val) {
|
||||
return writeBytes(fd, &val, sizeof(T));
|
||||
}
|
||||
|
||||
void CortexRecorder::Configure(int w, int h, uint32_t sr, uint32_t ch, int abr) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
videoWidth = w;
|
||||
videoHeight = h;
|
||||
sampleRate = sr;
|
||||
audioChannels = ch;
|
||||
audioBitrate = abr;
|
||||
LOGI("Configured: %dx%d, audio %uHz %uch %dbps", w, h, sr, ch, abr);
|
||||
}
|
||||
|
||||
void CortexRecorder::SetVideoFormat(const uint8_t* sps_pps, uint32_t size) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
videoCodecConfig.assign(sps_pps, sps_pps + size);
|
||||
LOGI("Video format set: %u bytes", size);
|
||||
}
|
||||
|
||||
void CortexRecorder::SetAudioFormat(const uint8_t* aac_config, uint32_t size) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
audioCodecConfig.assign(aac_config, aac_config + size);
|
||||
LOGI("Audio format set: %u bytes", size);
|
||||
}
|
||||
|
||||
void CortexRecorder::StartSession(const std::string& dir) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
|
||||
// Create directory
|
||||
mkdir(dir.c_str(), 0755);
|
||||
|
||||
sessionDir = dir;
|
||||
segmentIndex = 0;
|
||||
segmentStartUs = 0;
|
||||
currentSamples.clear();
|
||||
firstKeyframeData.clear();
|
||||
active = true;
|
||||
|
||||
LOGI("Session started: %s", dir.c_str());
|
||||
}
|
||||
|
||||
void CortexRecorder::StopSession() {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
if (!active) return;
|
||||
|
||||
// Finalize any partial segment
|
||||
if (!currentSamples.empty()) {
|
||||
FinalizeCurrentSegment();
|
||||
}
|
||||
|
||||
active = false;
|
||||
currentSamples.clear();
|
||||
firstKeyframeData.clear();
|
||||
videoCodecConfig.clear();
|
||||
audioCodecConfig.clear();
|
||||
LOGI("Session stopped");
|
||||
}
|
||||
|
||||
void CortexRecorder::FeedVideoPacket(const uint8_t* data, uint32_t size,
|
||||
int64_t timestampUs, bool isKeyframe) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
if (!active) return;
|
||||
|
||||
// On keyframe boundary, check if we should finalize the current segment
|
||||
if (isKeyframe && !currentSamples.empty()) {
|
||||
int64_t segDuration = timestampUs - segmentStartUs;
|
||||
if (segDuration >= SEGMENT_DURATION_US) {
|
||||
FinalizeCurrentSegment();
|
||||
}
|
||||
}
|
||||
|
||||
// Start new segment if empty
|
||||
if (currentSamples.empty()) {
|
||||
segmentStartUs = timestampUs;
|
||||
}
|
||||
|
||||
// Store first keyframe data for thumbnail callback
|
||||
if (isKeyframe && firstKeyframeData.empty()) {
|
||||
firstKeyframeData.assign(data, data + size);
|
||||
}
|
||||
|
||||
CortexSampleEntry entry;
|
||||
entry.track = 0; // video
|
||||
entry.flags = isKeyframe ? 1 : 0;
|
||||
entry.size = size;
|
||||
entry.timestampUs = timestampUs;
|
||||
entry.data.assign(data, data + size);
|
||||
currentSamples.push_back(std::move(entry));
|
||||
}
|
||||
|
||||
void CortexRecorder::FeedAudioPacket(const uint8_t* data, uint32_t size,
|
||||
int64_t timestampUs) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
if (!active) return;
|
||||
|
||||
CortexSampleEntry entry;
|
||||
entry.track = 1; // audio
|
||||
entry.flags = 0;
|
||||
entry.size = size;
|
||||
entry.timestampUs = timestampUs;
|
||||
entry.data.assign(data, data + size);
|
||||
currentSamples.push_back(std::move(entry));
|
||||
}
|
||||
|
||||
void CortexRecorder::SetMaxDurationMinutes(int minutes) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
maxDurationMinutes = minutes;
|
||||
LOGI("Max duration set to %d minutes", minutes);
|
||||
}
|
||||
|
||||
void CortexRecorder::SetSegmentCallback(SegmentCallback cb) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
segmentCallback = std::move(cb);
|
||||
}
|
||||
|
||||
bool CortexRecorder::IsActive() const {
|
||||
return active;
|
||||
}
|
||||
|
||||
std::string CortexRecorder::MakeSegmentPath(int index) const {
|
||||
char buf[64];
|
||||
snprintf(buf, sizeof(buf), "seg_%06d.seg", index);
|
||||
return sessionDir + "/" + buf;
|
||||
}
|
||||
|
||||
void CortexRecorder::FinalizeCurrentSegment() {
|
||||
// Must be called under lock
|
||||
if (currentSamples.empty()) return;
|
||||
|
||||
std::string segPath = MakeSegmentPath(segmentIndex);
|
||||
|
||||
int fd = open(segPath.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644);
|
||||
if (fd < 0) {
|
||||
LOGE("Failed to open segment file: %s", segPath.c_str());
|
||||
currentSamples.clear();
|
||||
firstKeyframeData.clear();
|
||||
return;
|
||||
}
|
||||
|
||||
// Compute duration
|
||||
int64_t lastTs = currentSamples.back().timestampUs;
|
||||
int64_t segDuration = lastTs - segmentStartUs;
|
||||
uint32_t sampleCount = static_cast<uint32_t>(currentSamples.size());
|
||||
|
||||
// Write header
|
||||
writeBytes(fd, SEGMENT_MAGIC, 4);
|
||||
writeVal<uint32_t>(fd, SEGMENT_VERSION);
|
||||
writeVal<uint32_t>(fd, static_cast<uint32_t>(videoWidth));
|
||||
writeVal<uint32_t>(fd, static_cast<uint32_t>(videoHeight));
|
||||
|
||||
// SPS/PPS
|
||||
uint32_t spsPpsSize = static_cast<uint32_t>(videoCodecConfig.size());
|
||||
writeVal<uint32_t>(fd, spsPpsSize);
|
||||
if (spsPpsSize > 0) writeBytes(fd, videoCodecConfig.data(), spsPpsSize);
|
||||
|
||||
// AAC config
|
||||
uint32_t aacConfigSize = static_cast<uint32_t>(audioCodecConfig.size());
|
||||
writeVal<uint32_t>(fd, aacConfigSize);
|
||||
if (aacConfigSize > 0) writeBytes(fd, audioCodecConfig.data(), aacConfigSize);
|
||||
|
||||
// Timing
|
||||
writeVal<int64_t>(fd, segmentStartUs);
|
||||
writeVal<int64_t>(fd, segDuration);
|
||||
writeVal<uint32_t>(fd, sampleCount);
|
||||
|
||||
// Write samples
|
||||
for (const auto& sample : currentSamples) {
|
||||
writeVal<uint8_t>(fd, sample.track);
|
||||
writeVal<uint8_t>(fd, sample.flags);
|
||||
writeVal<uint32_t>(fd, sample.size);
|
||||
writeVal<int64_t>(fd, sample.timestampUs);
|
||||
writeBytes(fd, sample.data.data(), sample.size);
|
||||
}
|
||||
|
||||
// End marker
|
||||
writeVal<uint32_t>(fd, END_MARKER);
|
||||
|
||||
close(fd);
|
||||
|
||||
LOGI("Segment finalized: %s (%u samples, %.1fs)",
|
||||
segPath.c_str(), sampleCount, segDuration / 1000000.0);
|
||||
|
||||
// Callback with segment path + first keyframe data for thumbnail
|
||||
if (segmentCallback && !firstKeyframeData.empty()) {
|
||||
// Copy callback and data before calling (callback may be slow)
|
||||
auto cb = segmentCallback;
|
||||
auto kfData = firstKeyframeData;
|
||||
// Call outside lock would be better, but we're already under lock.
|
||||
// The callback should be fast (just posts to another thread).
|
||||
cb(segPath, kfData.data(), static_cast<uint32_t>(kfData.size()));
|
||||
}
|
||||
|
||||
segmentIndex++;
|
||||
currentSamples.clear();
|
||||
firstKeyframeData.clear();
|
||||
|
||||
// Trim old segments
|
||||
TrimOldSegments();
|
||||
}
|
||||
|
||||
void CortexRecorder::TrimOldSegments() {
|
||||
// Must be called under lock
|
||||
// Scan all .seg files in session dir, read their duration from header,
|
||||
// and delete oldest when total exceeds maxDurationMinutes
|
||||
|
||||
struct SegInfo {
|
||||
std::string path;
|
||||
int64_t durationUs;
|
||||
int index;
|
||||
};
|
||||
|
||||
DIR* dir = opendir(sessionDir.c_str());
|
||||
if (!dir) return;
|
||||
|
||||
std::vector<SegInfo> segments;
|
||||
struct dirent* entry;
|
||||
while ((entry = readdir(dir)) != nullptr) {
|
||||
std::string name = entry->d_name;
|
||||
if (name.size() < 4 || name.substr(name.size() - 4) != ".seg") continue;
|
||||
|
||||
std::string path = sessionDir + "/" + name;
|
||||
|
||||
// Parse index from filename (seg_NNNNNN.seg)
|
||||
int idx = 0;
|
||||
if (name.size() >= 14 && name.substr(0, 4) == "seg_") {
|
||||
idx = std::atoi(name.substr(4, 6).c_str());
|
||||
}
|
||||
|
||||
// Read duration from header
|
||||
int fd = open(path.c_str(), O_RDONLY);
|
||||
if (fd < 0) continue;
|
||||
|
||||
// Skip: magic(4) + version(4) + width(4) + height(4) = 16
|
||||
// Then sps_pps_size(4) + N bytes, aac_config_size(4) + N bytes
|
||||
// Then seg_start_us(8), seg_duration_us(8)
|
||||
char magic[4];
|
||||
if (read(fd, magic, 4) != 4 || memcmp(magic, SEGMENT_MAGIC, 4) != 0) {
|
||||
close(fd);
|
||||
continue;
|
||||
}
|
||||
|
||||
uint32_t ver, w, h, spsSz, aacSz;
|
||||
read(fd, &ver, 4);
|
||||
read(fd, &w, 4);
|
||||
read(fd, &h, 4);
|
||||
read(fd, &spsSz, 4);
|
||||
lseek(fd, spsSz, SEEK_CUR);
|
||||
read(fd, &aacSz, 4);
|
||||
lseek(fd, aacSz, SEEK_CUR);
|
||||
|
||||
int64_t startUs, durationUs;
|
||||
read(fd, &startUs, 8);
|
||||
read(fd, &durationUs, 8);
|
||||
close(fd);
|
||||
|
||||
segments.push_back({path, durationUs, idx});
|
||||
}
|
||||
closedir(dir);
|
||||
|
||||
if (segments.empty()) return;
|
||||
|
||||
// Sort by index (ascending = oldest first)
|
||||
std::sort(segments.begin(), segments.end(),
|
||||
[](const SegInfo& a, const SegInfo& b) { return a.index < b.index; });
|
||||
|
||||
// Sum total duration from newest backward
|
||||
int64_t maxUs = static_cast<int64_t>(maxDurationMinutes) * 60 * 1000000LL;
|
||||
int64_t totalUs = 0;
|
||||
|
||||
// Walk from newest to oldest, mark keep boundary
|
||||
int keepFrom = static_cast<int>(segments.size()); // index into segments where we start keeping
|
||||
for (int i = static_cast<int>(segments.size()) - 1; i >= 0; i--) {
|
||||
totalUs += segments[i].durationUs;
|
||||
if (totalUs > maxUs) {
|
||||
// This segment pushes us over — delete from here backward
|
||||
break;
|
||||
}
|
||||
keepFrom = i;
|
||||
}
|
||||
|
||||
// Delete segments before keepFrom
|
||||
int deleted = 0;
|
||||
for (int i = 0; i < keepFrom; i++) {
|
||||
unlink(segments[i].path.c_str());
|
||||
|
||||
// Also delete associated thumbnail
|
||||
std::string thumbPath = sessionDir + "/";
|
||||
char thumbName[64];
|
||||
snprintf(thumbName, sizeof(thumbName), "thumb_%06d.jpg", segments[i].index);
|
||||
thumbPath += thumbName;
|
||||
unlink(thumbPath.c_str());
|
||||
|
||||
deleted++;
|
||||
}
|
||||
|
||||
if (deleted > 0) {
|
||||
LOGI("Trimmed %d old segments (total duration %.0fs, max %dm)",
|
||||
deleted, totalUs / 1000000.0, maxDurationMinutes);
|
||||
}
|
||||
}
|
||||
69
app/src/main/cpp/cortex_recorder.h
Normal file
69
app/src/main/cpp/cortex_recorder.h
Normal file
@@ -0,0 +1,69 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
struct CortexSampleEntry {
|
||||
uint8_t track; // 0=video, 1=audio
|
||||
uint8_t flags; // bit0=keyframe
|
||||
uint32_t size;
|
||||
int64_t timestampUs;
|
||||
std::vector<uint8_t> data;
|
||||
};
|
||||
|
||||
class CortexRecorder {
|
||||
public:
|
||||
using SegmentCallback = std::function<void(const std::string& segPath,
|
||||
const uint8_t* keyframeData,
|
||||
uint32_t keyframeSize)>;
|
||||
|
||||
CortexRecorder() = default;
|
||||
~CortexRecorder() = default;
|
||||
|
||||
void Configure(int w, int h, uint32_t sampleRate, uint32_t channels, int audioBitrate);
|
||||
void SetVideoFormat(const uint8_t* sps_pps, uint32_t size);
|
||||
void SetAudioFormat(const uint8_t* aac_config, uint32_t size);
|
||||
|
||||
void StartSession(const std::string& sessionDir);
|
||||
void StopSession();
|
||||
|
||||
void FeedVideoPacket(const uint8_t* data, uint32_t size, int64_t timestampUs, bool isKeyframe);
|
||||
void FeedAudioPacket(const uint8_t* data, uint32_t size, int64_t timestampUs);
|
||||
|
||||
void SetMaxDurationMinutes(int minutes);
|
||||
void SetSegmentCallback(SegmentCallback cb);
|
||||
|
||||
bool IsActive() const;
|
||||
|
||||
private:
|
||||
void FinalizeCurrentSegment();
|
||||
void TrimOldSegments();
|
||||
std::string MakeSegmentPath(int index) const;
|
||||
|
||||
std::mutex mutex;
|
||||
bool active = false;
|
||||
|
||||
int videoWidth = 0;
|
||||
int videoHeight = 0;
|
||||
uint32_t sampleRate = 48000;
|
||||
uint32_t audioChannels = 2;
|
||||
int audioBitrate = 128000;
|
||||
|
||||
std::vector<uint8_t> videoCodecConfig; // SPS+PPS
|
||||
std::vector<uint8_t> audioCodecConfig; // AAC config
|
||||
|
||||
std::string sessionDir;
|
||||
int segmentIndex = 0;
|
||||
int64_t segmentStartUs = 0;
|
||||
std::vector<CortexSampleEntry> currentSamples;
|
||||
std::vector<uint8_t> firstKeyframeData; // first keyframe NAL in current segment
|
||||
|
||||
int maxDurationMinutes = 10;
|
||||
|
||||
static constexpr int64_t SEGMENT_DURATION_US = 30000000LL; // 30 seconds
|
||||
|
||||
SegmentCallback segmentCallback;
|
||||
};
|
||||
235
app/src/main/cpp/faststart.cpp
Normal file
235
app/src/main/cpp/faststart.cpp
Normal file
@@ -0,0 +1,235 @@
|
||||
#include "faststart.h"
|
||||
|
||||
#include <android/log.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <vector>
|
||||
|
||||
#define TAG "MoovFastStart"
|
||||
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
|
||||
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
|
||||
|
||||
static uint32_t ReadBE32(FILE* f) {
|
||||
uint8_t buf[4];
|
||||
if (fread(buf, 1, 4, f) != 4) return 0;
|
||||
return (uint32_t(buf[0]) << 24) | (uint32_t(buf[1]) << 16) |
|
||||
(uint32_t(buf[2]) << 8) | uint32_t(buf[3]);
|
||||
}
|
||||
|
||||
static void WriteBE32(uint8_t* p, uint32_t v) {
|
||||
p[0] = (v >> 24) & 0xFF;
|
||||
p[1] = (v >> 16) & 0xFF;
|
||||
p[2] = (v >> 8) & 0xFF;
|
||||
p[3] = v & 0xFF;
|
||||
}
|
||||
|
||||
static void WriteBE64(uint8_t* p, uint64_t v) {
|
||||
WriteBE32(p, (uint32_t)(v >> 32));
|
||||
WriteBE32(p + 4, (uint32_t)(v & 0xFFFFFFFF));
|
||||
}
|
||||
|
||||
static uint64_t ReadBE64(const uint8_t* p) {
|
||||
return ((uint64_t)((uint32_t(p[0]) << 24) | (uint32_t(p[1]) << 16) |
|
||||
(uint32_t(p[2]) << 8) | uint32_t(p[3])) << 32) |
|
||||
((uint32_t(p[4]) << 24) | (uint32_t(p[5]) << 16) |
|
||||
(uint32_t(p[6]) << 8) | uint32_t(p[7]));
|
||||
}
|
||||
|
||||
// Adjust chunk offsets in moov by a delta (moov size)
|
||||
static void AdjustChunkOffsets(uint8_t* data, uint32_t size, int64_t delta) {
|
||||
if (size < 8) return;
|
||||
|
||||
uint32_t atomSize = (uint32_t(data[0]) << 24) | (uint32_t(data[1]) << 16) |
|
||||
(uint32_t(data[2]) << 8) | uint32_t(data[3]);
|
||||
char fourcc[5] = {(char)data[4], (char)data[5], (char)data[6], (char)data[7], 0};
|
||||
|
||||
if (atomSize == 0 || atomSize > size) atomSize = size;
|
||||
|
||||
if (strcmp(fourcc, "stco") == 0 && atomSize >= 16) {
|
||||
// 32-bit chunk offset box: 4 size + 4 fourcc + 1 version + 3 flags + 4 count + N*4 offsets
|
||||
uint32_t count = (uint32_t(data[12]) << 24) | (uint32_t(data[13]) << 16) |
|
||||
(uint32_t(data[14]) << 8) | uint32_t(data[15]);
|
||||
for (uint32_t i = 0; i < count && (16 + (i + 1) * 4) <= atomSize; i++) {
|
||||
uint8_t* p = data + 16 + i * 4;
|
||||
uint32_t offset = (uint32_t(p[0]) << 24) | (uint32_t(p[1]) << 16) |
|
||||
(uint32_t(p[2]) << 8) | uint32_t(p[3]);
|
||||
WriteBE32(p, (uint32_t)(offset + delta));
|
||||
}
|
||||
} else if (strcmp(fourcc, "co64") == 0 && atomSize >= 16) {
|
||||
// 64-bit chunk offset box
|
||||
uint32_t count = (uint32_t(data[12]) << 24) | (uint32_t(data[13]) << 16) |
|
||||
(uint32_t(data[14]) << 8) | uint32_t(data[15]);
|
||||
for (uint32_t i = 0; i < count && (16 + (i + 1) * 8) <= atomSize; i++) {
|
||||
uint8_t* p = data + 16 + i * 8;
|
||||
uint64_t offset = ReadBE64(p);
|
||||
WriteBE64(p, offset + delta);
|
||||
}
|
||||
} else {
|
||||
// Container atom — recurse into children
|
||||
// Skip header (8 bytes for regular, 16 for version+flags atoms)
|
||||
bool isContainer = (strcmp(fourcc, "moov") == 0 || strcmp(fourcc, "trak") == 0 ||
|
||||
strcmp(fourcc, "mdia") == 0 || strcmp(fourcc, "minf") == 0 ||
|
||||
strcmp(fourcc, "stbl") == 0 || strcmp(fourcc, "edts") == 0 ||
|
||||
strcmp(fourcc, "dinf") == 0 || strcmp(fourcc, "udta") == 0);
|
||||
if (isContainer) {
|
||||
uint32_t offset = 8;
|
||||
while (offset + 8 <= atomSize) {
|
||||
uint32_t childSize = (uint32_t(data[offset]) << 24) | (uint32_t(data[offset+1]) << 16) |
|
||||
(uint32_t(data[offset+2]) << 8) | uint32_t(data[offset+3]);
|
||||
if (childSize < 8 || offset + childSize > atomSize) break;
|
||||
AdjustChunkOffsets(data + offset, childSize, delta);
|
||||
offset += childSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool CopyBytes(FILE* src, FILE* dst, int64_t count) {
|
||||
uint8_t buf[8192];
|
||||
while (count > 0) {
|
||||
size_t toRead = (count > (int64_t)sizeof(buf)) ? sizeof(buf) : (size_t)count;
|
||||
size_t read = fread(buf, 1, toRead, src);
|
||||
if (read != toRead) return false;
|
||||
if (fwrite(buf, 1, read, dst) != read) return false;
|
||||
count -= read;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MoovFastStart(const std::string& inputPath, const std::string& outputPath) {
|
||||
FILE* input = fopen(inputPath.c_str(), "rb");
|
||||
if (!input) {
|
||||
LOGE("Cannot open input: %s", inputPath.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Scan for atoms
|
||||
struct Atom { char fourcc[5]; int64_t offset; int64_t size; };
|
||||
std::vector<Atom> atoms;
|
||||
|
||||
fseek(input, 0, SEEK_END);
|
||||
int64_t fileSize = ftell(input);
|
||||
fseek(input, 0, SEEK_SET);
|
||||
|
||||
int64_t pos = 0;
|
||||
while (pos < fileSize) {
|
||||
fseek(input, pos, SEEK_SET);
|
||||
uint32_t size32 = ReadBE32(input);
|
||||
uint32_t fourcc_raw = ReadBE32(input);
|
||||
|
||||
Atom atom;
|
||||
atom.fourcc[0] = (fourcc_raw >> 24) & 0xFF;
|
||||
atom.fourcc[1] = (fourcc_raw >> 16) & 0xFF;
|
||||
atom.fourcc[2] = (fourcc_raw >> 8) & 0xFF;
|
||||
atom.fourcc[3] = fourcc_raw & 0xFF;
|
||||
atom.fourcc[4] = 0;
|
||||
atom.offset = pos;
|
||||
|
||||
if (size32 == 1) {
|
||||
// 64-bit extended size
|
||||
uint8_t ext[8];
|
||||
if (fread(ext, 1, 8, input) != 8) break;
|
||||
atom.size = (int64_t)ReadBE64(ext);
|
||||
} else if (size32 == 0) {
|
||||
atom.size = fileSize - pos;
|
||||
} else {
|
||||
atom.size = size32;
|
||||
}
|
||||
|
||||
if (atom.size < 8) break;
|
||||
atoms.push_back(atom);
|
||||
pos += atom.size;
|
||||
}
|
||||
|
||||
// Find moov and mdat
|
||||
int moovIdx = -1, mdatIdx = -1;
|
||||
for (int i = 0; i < (int)atoms.size(); i++) {
|
||||
if (strcmp(atoms[i].fourcc, "moov") == 0) moovIdx = i;
|
||||
if (strcmp(atoms[i].fourcc, "mdat") == 0) mdatIdx = i;
|
||||
}
|
||||
|
||||
if (moovIdx < 0) {
|
||||
LOGE("No moov atom found");
|
||||
fclose(input);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (moovIdx < mdatIdx || mdatIdx < 0) {
|
||||
// moov already before mdat — just copy as-is
|
||||
LOGI("moov already at front, copying file");
|
||||
fclose(input);
|
||||
if (inputPath != outputPath) {
|
||||
FILE* in2 = fopen(inputPath.c_str(), "rb");
|
||||
FILE* out = fopen(outputPath.c_str(), "wb");
|
||||
if (!in2 || !out) {
|
||||
if (in2) fclose(in2);
|
||||
if (out) fclose(out);
|
||||
return false;
|
||||
}
|
||||
bool ok = CopyBytes(in2, out, fileSize);
|
||||
fclose(in2);
|
||||
fclose(out);
|
||||
return ok;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Read moov into memory
|
||||
int64_t moovSize = atoms[moovIdx].size;
|
||||
std::vector<uint8_t> moovData(moovSize);
|
||||
fseek(input, atoms[moovIdx].offset, SEEK_SET);
|
||||
if (fread(moovData.data(), 1, moovSize, input) != (size_t)moovSize) {
|
||||
LOGE("Failed to read moov atom");
|
||||
fclose(input);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Adjust chunk offsets by moov size (since moov will be inserted before mdat)
|
||||
AdjustChunkOffsets(moovData.data(), moovData.size(), moovSize);
|
||||
|
||||
// Write output: atoms before mdat + moov + mdat + atoms after moov
|
||||
FILE* output = fopen(outputPath.c_str(), "wb");
|
||||
if (!output) {
|
||||
LOGE("Cannot open output: %s", outputPath.c_str());
|
||||
fclose(input);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Write all atoms before mdat
|
||||
for (int i = 0; i < mdatIdx; i++) {
|
||||
fseek(input, atoms[i].offset, SEEK_SET);
|
||||
if (!CopyBytes(input, output, atoms[i].size)) {
|
||||
LOGE("Failed to copy pre-mdat atom");
|
||||
fclose(input);
|
||||
fclose(output);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Write moov (with adjusted offsets)
|
||||
if (fwrite(moovData.data(), 1, moovSize, output) != (size_t)moovSize) {
|
||||
LOGE("Failed to write moov");
|
||||
fclose(input);
|
||||
fclose(output);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Write mdat and everything after (except moov which we already wrote)
|
||||
for (int i = mdatIdx; i < (int)atoms.size(); i++) {
|
||||
if (i == moovIdx) continue; // skip moov — already written
|
||||
fseek(input, atoms[i].offset, SEEK_SET);
|
||||
if (!CopyBytes(input, output, atoms[i].size)) {
|
||||
LOGE("Failed to copy post-moov atom");
|
||||
fclose(input);
|
||||
fclose(output);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
fclose(input);
|
||||
fclose(output);
|
||||
LOGI("Faststart complete: moov moved to front");
|
||||
return true;
|
||||
}
|
||||
12
app/src/main/cpp/faststart.h
Normal file
12
app/src/main/cpp/faststart.h
Normal file
@@ -0,0 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
/**
|
||||
* Moves the moov atom from the end of an MP4 file to the beginning,
|
||||
* enabling instant playback in web browsers without buffering.
|
||||
*
|
||||
* inputPath and outputPath may be different files.
|
||||
* Returns true if moov was successfully relocated (or was already at front).
|
||||
*/
|
||||
bool MoovFastStart(const std::string& inputPath, const std::string& outputPath);
|
||||
@@ -15,6 +15,8 @@ static JavaVM* gJavaVM = nullptr;
|
||||
static jmethodID gOnStatsMethod = nullptr;
|
||||
static jmethodID gOnErrorMethod = nullptr;
|
||||
static jmethodID gOnBufferReleasedMethod = nullptr;
|
||||
static jmethodID gOnClipReadyMethod = nullptr;
|
||||
static jmethodID gOnCortexSegmentMethod = nullptr;
|
||||
|
||||
JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved) {
|
||||
gJavaVM = vm;
|
||||
@@ -43,6 +45,38 @@ Java_com_omixlab_lckcontrol_streaming_NativeStreamingEngine_nativeCreate(
|
||||
gOnStatsMethod = env->GetMethodID(cls, "onNativeStats", "(JJII)V");
|
||||
gOnErrorMethod = env->GetMethodID(cls, "onNativeError", "(ILjava/lang/String;)V");
|
||||
gOnBufferReleasedMethod = env->GetMethodID(cls, "onNativeBufferReleased", "(I)V");
|
||||
gOnClipReadyMethod = env->GetMethodID(cls, "onNativeClipReady", "(Ljava/lang/String;)V");
|
||||
gOnCortexSegmentMethod = env->GetMethodID(cls, "onNativeCortexSegment", "(Ljava/lang/String;[B)V");
|
||||
|
||||
engine->SetCortexSegmentCallback([globalRef](const std::string& segPath,
|
||||
const uint8_t* keyframeData,
|
||||
uint32_t keyframeSize) {
|
||||
JNIEnv* env;
|
||||
if (gJavaVM->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6) != JNI_OK) {
|
||||
if (gJavaVM->AttachCurrentThread(&env, nullptr) != JNI_OK) return;
|
||||
}
|
||||
if (gOnCortexSegmentMethod) {
|
||||
jstring jpath = env->NewStringUTF(segPath.c_str());
|
||||
jbyteArray jdata = env->NewByteArray(keyframeSize);
|
||||
env->SetByteArrayRegion(jdata, 0, keyframeSize,
|
||||
reinterpret_cast<const jbyte*>(keyframeData));
|
||||
env->CallVoidMethod(globalRef, gOnCortexSegmentMethod, jpath, jdata);
|
||||
env->DeleteLocalRef(jpath);
|
||||
env->DeleteLocalRef(jdata);
|
||||
}
|
||||
});
|
||||
|
||||
engine->SetClipReadyCallback([globalRef](const std::string& path) {
|
||||
JNIEnv* env;
|
||||
if (gJavaVM->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6) != JNI_OK) {
|
||||
if (gJavaVM->AttachCurrentThread(&env, nullptr) != JNI_OK) return;
|
||||
}
|
||||
if (gOnClipReadyMethod) {
|
||||
jstring jpath = env->NewStringUTF(path.c_str());
|
||||
env->CallVoidMethod(globalRef, gOnClipReadyMethod, jpath);
|
||||
env->DeleteLocalRef(jpath);
|
||||
}
|
||||
});
|
||||
|
||||
engine->SetStatsCallback([globalRef](const StreamingStats& stats) {
|
||||
JNIEnv* env;
|
||||
@@ -241,4 +275,55 @@ Java_com_omixlab_lckcontrol_streaming_NativeStreamingEngine_nativeSetComposition
|
||||
engine->SetCompositionLayerEnabled(layerId, enabled == JNI_TRUE);
|
||||
}
|
||||
|
||||
// --- Clip recording ---
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_com_omixlab_lckcontrol_streaming_NativeStreamingEngine_nativeEnableClipRecording(
|
||||
JNIEnv* env, jobject thiz, jlong ptr, jint width, jint height) {
|
||||
auto* engine = reinterpret_cast<StreamingEngine*>(ptr);
|
||||
if (!engine) return;
|
||||
engine->EnableClipRecording(width, height);
|
||||
}
|
||||
|
||||
JNIEXPORT jboolean JNICALL
|
||||
Java_com_omixlab_lckcontrol_streaming_NativeStreamingEngine_nativeFlushClip(
|
||||
JNIEnv* env, jobject thiz, jlong ptr, jstring outputDir) {
|
||||
auto* engine = reinterpret_cast<StreamingEngine*>(ptr);
|
||||
if (!engine) return JNI_FALSE;
|
||||
|
||||
const char* dir = env->GetStringUTFChars(outputDir, nullptr);
|
||||
bool result = engine->FlushClip(dir);
|
||||
env->ReleaseStringUTFChars(outputDir, dir);
|
||||
return result ? JNI_TRUE : JNI_FALSE;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_com_omixlab_lckcontrol_streaming_NativeStreamingEngine_nativeDisableClipRecording(
|
||||
JNIEnv* env, jobject thiz, jlong ptr) {
|
||||
auto* engine = reinterpret_cast<StreamingEngine*>(ptr);
|
||||
if (!engine) return;
|
||||
engine->DisableClipRecording();
|
||||
}
|
||||
|
||||
// --- Cortex recording ---
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_com_omixlab_lckcontrol_streaming_NativeStreamingEngine_nativeEnableCortexRecording(
|
||||
JNIEnv* env, jobject thiz, jlong ptr, jstring sessionDir, jint maxMinutes) {
|
||||
auto* engine = reinterpret_cast<StreamingEngine*>(ptr);
|
||||
if (!engine) return;
|
||||
|
||||
const char* dir = env->GetStringUTFChars(sessionDir, nullptr);
|
||||
engine->EnableCortexRecording(dir, maxMinutes);
|
||||
env->ReleaseStringUTFChars(sessionDir, dir);
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_com_omixlab_lckcontrol_streaming_NativeStreamingEngine_nativeDisableCortexRecording(
|
||||
JNIEnv* env, jobject thiz, jlong ptr) {
|
||||
auto* engine = reinterpret_cast<StreamingEngine*>(ptr);
|
||||
if (!engine) return;
|
||||
engine->DisableCortexRecording();
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
||||
@@ -268,8 +268,8 @@ bool StreamingEngine::Start() {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (sinks.empty()) {
|
||||
LOGE("No destinations configured");
|
||||
if (sinks.empty() && !cortexRecordingEnabled) {
|
||||
LOGE("No destinations and cortex not enabled");
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -443,6 +443,12 @@ void StreamingEngine::EncoderThreadFunc() {
|
||||
// Cleanup
|
||||
LOGI("Encoder thread shutting down");
|
||||
|
||||
// Stop cortex recording if active
|
||||
if (cortexRecordingEnabled) {
|
||||
cortexRecorder.StopSession();
|
||||
cortexRecordingEnabled = false;
|
||||
}
|
||||
|
||||
ReleaseBlitResources();
|
||||
eglContext.DestroyPreviewSurface();
|
||||
hasPreview = false;
|
||||
@@ -610,6 +616,18 @@ void StreamingEngine::DrainVideoEncoder() {
|
||||
timestampMs, isKeyframe);
|
||||
}
|
||||
|
||||
// Feed clip recorder (skip codec config frames)
|
||||
if (clipRecordingEnabled && !(info.flags & AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG)) {
|
||||
clipRecorder.FeedVideoPacket(outputData + info.offset, info.size,
|
||||
info.presentationTimeUs, isKeyframe);
|
||||
}
|
||||
|
||||
// Feed cortex recorder (skip codec config frames)
|
||||
if (cortexRecordingEnabled && !(info.flags & AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG)) {
|
||||
cortexRecorder.FeedVideoPacket(outputData + info.offset, info.size,
|
||||
info.presentationTimeUs, isKeyframe);
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(statsMutex);
|
||||
statsVideoBytes += info.size;
|
||||
statsFrameCount++;
|
||||
@@ -624,6 +642,14 @@ void StreamingEngine::DrainVideoEncoder() {
|
||||
for (auto* sink : sinks) {
|
||||
sink->OnVideoFormatReady(configData + info.offset, info.size);
|
||||
}
|
||||
// Forward SPS/PPS to clip recorder
|
||||
if (clipRecordingEnabled) {
|
||||
clipRecorder.SetVideoFormat(configData + info.offset, info.size);
|
||||
}
|
||||
// Forward SPS/PPS to cortex recorder
|
||||
if (cortexRecordingEnabled) {
|
||||
cortexRecorder.SetVideoFormat(configData + info.offset, info.size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -656,6 +682,18 @@ void StreamingEngine::DrainAudioEncoder() {
|
||||
sink->SendAudioPacket(outputData + info.offset, info.size, timestampMs);
|
||||
}
|
||||
|
||||
// Feed clip recorder
|
||||
if (clipRecordingEnabled) {
|
||||
clipRecorder.FeedAudioPacket(outputData + info.offset, info.size,
|
||||
info.presentationTimeUs);
|
||||
}
|
||||
|
||||
// Feed cortex recorder
|
||||
if (cortexRecordingEnabled) {
|
||||
cortexRecorder.FeedAudioPacket(outputData + info.offset, info.size,
|
||||
info.presentationTimeUs);
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(statsMutex);
|
||||
statsAudioBytes += info.size;
|
||||
}
|
||||
@@ -663,6 +701,22 @@ void StreamingEngine::DrainAudioEncoder() {
|
||||
|
||||
if (info.flags & AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG) {
|
||||
// AAC config — sinks handle audio format via Open()
|
||||
// Forward to clip recorder
|
||||
if (clipRecordingEnabled) {
|
||||
size_t outSize;
|
||||
uint8_t* configData = AMediaCodec_getOutputBuffer(audioEncoder, outputIndex, &outSize);
|
||||
if (configData) {
|
||||
clipRecorder.SetAudioFormat(configData + info.offset, info.size);
|
||||
}
|
||||
}
|
||||
// Forward to cortex recorder
|
||||
if (cortexRecordingEnabled) {
|
||||
size_t outSize;
|
||||
uint8_t* configData = AMediaCodec_getOutputBuffer(audioEncoder, outputIndex, &outSize);
|
||||
if (configData) {
|
||||
cortexRecorder.SetAudioFormat(configData + info.offset, info.size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AMediaCodec_releaseOutputBuffer(audioEncoder, outputIndex, false);
|
||||
@@ -888,3 +942,47 @@ void StreamingEngine::ProcessPendingLayerOps() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Clip recording ---
|
||||
|
||||
void StreamingEngine::EnableClipRecording(int w, int h) {
|
||||
clipRecorder.Configure(w, h, sampleRate, channels, audioBitrate);
|
||||
clipRecorder.Start();
|
||||
clipRecordingEnabled = true;
|
||||
LOGI("Clip recording enabled: %dx%d", w, h);
|
||||
}
|
||||
|
||||
bool StreamingEngine::FlushClip(const std::string& outputDir) {
|
||||
if (!clipRecordingEnabled) return false;
|
||||
return clipRecorder.FlushClip(outputDir);
|
||||
}
|
||||
|
||||
void StreamingEngine::DisableClipRecording() {
|
||||
clipRecordingEnabled = false;
|
||||
clipRecorder.Stop();
|
||||
LOGI("Clip recording disabled");
|
||||
}
|
||||
|
||||
void StreamingEngine::SetClipReadyCallback(ClipRecorder::ClipReadyCallback callback) {
|
||||
clipRecorder.SetClipReadyCallback(std::move(callback));
|
||||
}
|
||||
|
||||
// --- Cortex recording ---
|
||||
|
||||
void StreamingEngine::EnableCortexRecording(const std::string& sessionDir, int maxMinutes) {
|
||||
cortexRecorder.Configure(width, height, sampleRate, channels, audioBitrate);
|
||||
cortexRecorder.SetMaxDurationMinutes(maxMinutes);
|
||||
cortexRecorder.StartSession(sessionDir);
|
||||
cortexRecordingEnabled = true;
|
||||
LOGI("Cortex recording enabled: %s (%d min)", sessionDir.c_str(), maxMinutes);
|
||||
}
|
||||
|
||||
void StreamingEngine::DisableCortexRecording() {
|
||||
cortexRecordingEnabled = false;
|
||||
cortexRecorder.StopSession();
|
||||
LOGI("Cortex recording disabled");
|
||||
}
|
||||
|
||||
void StreamingEngine::SetCortexSegmentCallback(CortexRecorder::SegmentCallback cb) {
|
||||
cortexRecorder.SetSegmentCallback(std::move(cb));
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
#include "egl_context.h"
|
||||
#include "composition_pipeline.h"
|
||||
#include "rtmp_sink.h"
|
||||
#include "clip_recorder.h"
|
||||
#include "cortex_recorder.h"
|
||||
|
||||
#include <media/NdkMediaCodec.h>
|
||||
#include <media/NdkMediaFormat.h>
|
||||
@@ -93,6 +95,17 @@ public:
|
||||
void UpdateCompositionLayerOpacity(int layerId, float opacity);
|
||||
void SetCompositionLayerEnabled(int layerId, bool enabled);
|
||||
|
||||
// Clip recording
|
||||
void EnableClipRecording(int width, int height);
|
||||
bool FlushClip(const std::string& outputDir);
|
||||
void DisableClipRecording();
|
||||
void SetClipReadyCallback(ClipRecorder::ClipReadyCallback callback);
|
||||
|
||||
// Cortex recording
|
||||
void EnableCortexRecording(const std::string& sessionDir, int maxMinutes);
|
||||
void DisableCortexRecording();
|
||||
void SetCortexSegmentCallback(CortexRecorder::SegmentCallback cb);
|
||||
|
||||
private:
|
||||
// Encoder thread
|
||||
void EncoderThreadFunc();
|
||||
@@ -219,4 +232,12 @@ private:
|
||||
bool InitAudioEncoder();
|
||||
bool InitBlitResources();
|
||||
void ReleaseBlitResources();
|
||||
|
||||
// Clip recording
|
||||
ClipRecorder clipRecorder;
|
||||
bool clipRecordingEnabled = false;
|
||||
|
||||
// Cortex recording
|
||||
CortexRecorder cortexRecorder;
|
||||
bool cortexRecordingEnabled = false;
|
||||
};
|
||||
|
||||
337
app/src/main/java/com/omixlab/lckcontrol/cortex/CortexManager.kt
Normal file
337
app/src/main/java/com/omixlab/lckcontrol/cortex/CortexManager.kt
Normal file
@@ -0,0 +1,337 @@
|
||||
package com.omixlab.lckcontrol.cortex
|
||||
|
||||
import android.content.Context
|
||||
import android.graphics.Bitmap
|
||||
import android.hardware.HardwareBuffer
|
||||
import android.media.MediaCodec
|
||||
import android.media.MediaCodecInfo
|
||||
import android.media.MediaFormat
|
||||
import android.media.MediaMetadataRetriever
|
||||
import android.media.MediaMuxer
|
||||
import android.util.Log
|
||||
import com.omixlab.lckcontrol.streaming.NativeStreamingEngine
|
||||
import dagger.hilt.android.qualifiers.ApplicationContext
|
||||
import kotlinx.coroutines.CoroutineScope
|
||||
import kotlinx.coroutines.Dispatchers
|
||||
import kotlinx.coroutines.SupervisorJob
|
||||
import kotlinx.coroutines.flow.MutableStateFlow
|
||||
import kotlinx.coroutines.flow.StateFlow
|
||||
import kotlinx.coroutines.flow.asStateFlow
|
||||
import kotlinx.coroutines.launch
|
||||
import java.io.File
|
||||
import java.io.FileOutputStream
|
||||
import java.util.UUID
|
||||
import javax.inject.Inject
|
||||
import javax.inject.Singleton
|
||||
|
||||
@Singleton
|
||||
class CortexManager @Inject constructor(
|
||||
@ApplicationContext private val context: Context,
|
||||
private val cortexPreferences: CortexPreferences,
|
||||
) {
|
||||
companion object {
|
||||
private const val TAG = "CortexManager"
|
||||
private const val CORTEX_DIR = "cortex"
|
||||
private const val VIDEO_BITRATE = 8_000_000
|
||||
private const val AUDIO_BITRATE = 128_000
|
||||
private const val SAMPLE_RATE = 48_000
|
||||
private const val CHANNELS = 2
|
||||
private const val KEYFRAME_INTERVAL = 2
|
||||
}
|
||||
|
||||
private val scope = CoroutineScope(SupervisorJob() + Dispatchers.IO)
|
||||
|
||||
private var cortexEngine: NativeStreamingEngine? = null
|
||||
private var currentSessionId: String? = null
|
||||
private var poolWidth = 0
|
||||
private var poolHeight = 0
|
||||
|
||||
private val _sessions = MutableStateFlow<List<CortexSession>>(emptyList())
|
||||
val sessions: StateFlow<List<CortexSession>> = _sessions.asStateFlow()
|
||||
|
||||
private val _isRecording = MutableStateFlow(false)
|
||||
val isRecording: StateFlow<Boolean> = _isRecording.asStateFlow()
|
||||
|
||||
private val _storageUsedBytes = MutableStateFlow(0L)
|
||||
val storageUsedBytes: StateFlow<Long> = _storageUsedBytes.asStateFlow()
|
||||
|
||||
var onBufferReleased: ((Int) -> Unit)? = null
|
||||
|
||||
val hasCortexEngine: Boolean get() = cortexEngine != null
|
||||
|
||||
private fun cortexBaseDir(): File = File(context.filesDir, CORTEX_DIR)
|
||||
|
||||
fun onTexturePoolRegistered(width: Int, height: Int) {
|
||||
poolWidth = width
|
||||
poolHeight = height
|
||||
if (cortexPreferences.isEnabled()) {
|
||||
startCortexOnlyEngine()
|
||||
}
|
||||
}
|
||||
|
||||
fun onTexturePoolUnregistered() {
|
||||
stopCortexEngine()
|
||||
poolWidth = 0
|
||||
poolHeight = 0
|
||||
}
|
||||
|
||||
fun onStreamingStarting(engine: NativeStreamingEngine) {
|
||||
// Stop cortex-only engine — streaming engine takes over
|
||||
stopCortexEngine()
|
||||
|
||||
// Enable cortex recording on the streaming engine
|
||||
if (cortexPreferences.isEnabled()) {
|
||||
val sessionDir = getOrCreateSessionDir()
|
||||
engine.enableCortexRecording(sessionDir.absolutePath, cortexPreferences.getMaxMinutes())
|
||||
engine.onCortexSegment = { segPath, keyframeData ->
|
||||
scope.launch { handleSegmentReady(segPath, keyframeData) }
|
||||
}
|
||||
_isRecording.value = true
|
||||
Log.i(TAG, "Cortex enabled on streaming engine: ${sessionDir.absolutePath}")
|
||||
}
|
||||
}
|
||||
|
||||
fun onStreamingStopped() {
|
||||
// Restart cortex-only engine if we have a texture pool and cortex is enabled
|
||||
if (poolWidth > 0 && poolHeight > 0 && cortexPreferences.isEnabled()) {
|
||||
startCortexOnlyEngine()
|
||||
}
|
||||
}
|
||||
|
||||
fun submitVideoFrame(buffer: HardwareBuffer, timestampNs: Long, fenceFd: Int, bufferIndex: Int) {
|
||||
cortexEngine?.submitVideoFrame(buffer, timestampNs, fenceFd, bufferIndex)
|
||||
}
|
||||
|
||||
fun submitAudioFrame(pcmData: ByteArray, timestampNs: Long) {
|
||||
cortexEngine?.submitAudioFrame(pcmData, timestampNs)
|
||||
}
|
||||
|
||||
fun deleteSession(sessionId: String) {
|
||||
val dir = File(cortexBaseDir(), sessionId)
|
||||
if (dir.exists()) {
|
||||
dir.deleteRecursively()
|
||||
}
|
||||
refreshSessions()
|
||||
}
|
||||
|
||||
fun deleteAllSessions() {
|
||||
// Don't delete the current active session
|
||||
val activeId = currentSessionId
|
||||
cortexBaseDir().listFiles()?.forEach { dir ->
|
||||
if (dir.isDirectory && dir.name != activeId) {
|
||||
dir.deleteRecursively()
|
||||
}
|
||||
}
|
||||
refreshSessions()
|
||||
}
|
||||
|
||||
fun refreshSessions() {
|
||||
scope.launch {
|
||||
val baseDir = cortexBaseDir()
|
||||
if (!baseDir.exists()) {
|
||||
_sessions.value = emptyList()
|
||||
_storageUsedBytes.value = 0L
|
||||
return@launch
|
||||
}
|
||||
|
||||
var totalSize = 0L
|
||||
val sessionList = baseDir.listFiles()
|
||||
?.filter { it.isDirectory }
|
||||
?.mapNotNull { dir ->
|
||||
val segFiles = dir.listFiles { f -> f.name.endsWith(".seg") } ?: emptyArray()
|
||||
if (segFiles.isEmpty()) return@mapNotNull null
|
||||
|
||||
val thumbnails = dir.listFiles { f -> f.name.endsWith(".jpg") }
|
||||
?.sortedBy { it.name }
|
||||
?.map { it.absolutePath }
|
||||
?: emptyList()
|
||||
|
||||
val size = dir.listFiles()?.sumOf { it.length() } ?: 0L
|
||||
totalSize += size
|
||||
|
||||
CortexSession(
|
||||
sessionId = dir.name,
|
||||
directory = dir,
|
||||
segmentCount = segFiles.size,
|
||||
thumbnailPaths = thumbnails,
|
||||
totalSizeBytes = size,
|
||||
startTime = segFiles.minOf { it.lastModified() },
|
||||
)
|
||||
}
|
||||
?.sortedByDescending { it.startTime }
|
||||
?: emptyList()
|
||||
|
||||
_sessions.value = sessionList
|
||||
_storageUsedBytes.value = totalSize
|
||||
}
|
||||
}
|
||||
|
||||
private fun startCortexOnlyEngine() {
|
||||
if (cortexEngine != null) return
|
||||
if (poolWidth <= 0 || poolHeight <= 0) return
|
||||
|
||||
Log.i(TAG, "Starting cortex-only engine: ${poolWidth}x${poolHeight}")
|
||||
|
||||
val eng = NativeStreamingEngine()
|
||||
eng.create(
|
||||
width = poolWidth,
|
||||
height = poolHeight,
|
||||
videoBitrate = VIDEO_BITRATE,
|
||||
audioBitrate = AUDIO_BITRATE,
|
||||
sampleRate = SAMPLE_RATE,
|
||||
channels = CHANNELS,
|
||||
keyframeInterval = KEYFRAME_INTERVAL,
|
||||
)
|
||||
|
||||
// No addDestination calls — zero sinks
|
||||
|
||||
eng.onBufferReleased = { index ->
|
||||
onBufferReleased?.invoke(index)
|
||||
}
|
||||
|
||||
val sessionDir = getOrCreateSessionDir()
|
||||
eng.enableCortexRecording(sessionDir.absolutePath, cortexPreferences.getMaxMinutes())
|
||||
|
||||
eng.onCortexSegment = { segPath, keyframeData ->
|
||||
scope.launch { handleSegmentReady(segPath, keyframeData) }
|
||||
}
|
||||
|
||||
if (eng.start()) {
|
||||
cortexEngine = eng
|
||||
_isRecording.value = true
|
||||
Log.i(TAG, "Cortex-only engine started")
|
||||
} else {
|
||||
eng.destroy()
|
||||
Log.e(TAG, "Failed to start cortex-only engine")
|
||||
}
|
||||
}
|
||||
|
||||
private fun stopCortexEngine() {
|
||||
val eng = cortexEngine ?: return
|
||||
Log.i(TAG, "Stopping cortex-only engine")
|
||||
eng.disableCortexRecording()
|
||||
eng.stop()
|
||||
eng.destroy()
|
||||
cortexEngine = null
|
||||
_isRecording.value = false
|
||||
refreshSessions()
|
||||
}
|
||||
|
||||
private fun getOrCreateSessionDir(): File {
|
||||
val id = currentSessionId ?: UUID.randomUUID().toString().also { currentSessionId = it }
|
||||
val dir = File(cortexBaseDir(), id)
|
||||
dir.mkdirs()
|
||||
return dir
|
||||
}
|
||||
|
||||
private suspend fun handleSegmentReady(segPath: String, keyframeData: ByteArray) {
|
||||
Log.i(TAG, "Segment ready: $segPath")
|
||||
generateThumbnail(segPath, keyframeData)
|
||||
refreshSessions()
|
||||
}
|
||||
|
||||
private fun generateThumbnail(segPath: String, keyframeData: ByteArray) {
|
||||
try {
|
||||
val segFile = File(segPath)
|
||||
val segName = segFile.nameWithoutExtension // e.g. seg_000000
|
||||
val segIndex = segName.removePrefix("seg_")
|
||||
val thumbFile = File(segFile.parentFile, "thumb_$segIndex.jpg")
|
||||
|
||||
// Mux the single keyframe into a temp MP4 so MediaMetadataRetriever can decode it
|
||||
val tempMp4 = File(segFile.parentFile, "thumb_temp_$segIndex.mp4")
|
||||
try {
|
||||
val muxer = MediaMuxer(tempMp4.absolutePath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4)
|
||||
|
||||
val format = MediaFormat.createVideoFormat(MediaFormat.MIMETYPE_VIDEO_AVC, poolWidth, poolHeight)
|
||||
|
||||
// Parse SPS/PPS from the .seg file header
|
||||
// For now, use the keyframe data directly — the codec config should be set on the format
|
||||
// We need to read the SPS/PPS from the segment file
|
||||
val spsPps = readSpsPpsFromSegment(segPath)
|
||||
if (spsPps != null) {
|
||||
// Split SPS/PPS (Annex-B: [00 00 00 01 SPS] [00 00 00 01 PPS])
|
||||
val startCode = byteArrayOf(0, 0, 0, 1)
|
||||
var ppsOffset = -1
|
||||
for (i in 4 until spsPps.size - 3) {
|
||||
if (spsPps[i] == 0.toByte() && spsPps[i + 1] == 0.toByte() &&
|
||||
spsPps[i + 2] == 0.toByte() && spsPps[i + 3] == 1.toByte()
|
||||
) {
|
||||
ppsOffset = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if (ppsOffset > 0) {
|
||||
format.setByteBuffer("csd-0", java.nio.ByteBuffer.wrap(spsPps, 0, ppsOffset))
|
||||
format.setByteBuffer("csd-1", java.nio.ByteBuffer.wrap(spsPps, ppsOffset, spsPps.size - ppsOffset))
|
||||
} else {
|
||||
format.setByteBuffer("csd-0", java.nio.ByteBuffer.wrap(spsPps))
|
||||
}
|
||||
}
|
||||
|
||||
val trackIndex = muxer.addTrack(format)
|
||||
muxer.start()
|
||||
|
||||
val bufferInfo = MediaCodec.BufferInfo().apply {
|
||||
offset = 0
|
||||
size = keyframeData.size
|
||||
presentationTimeUs = 0
|
||||
flags = MediaCodec.BUFFER_FLAG_KEY_FRAME
|
||||
}
|
||||
muxer.writeSampleData(trackIndex, java.nio.ByteBuffer.wrap(keyframeData), bufferInfo)
|
||||
|
||||
muxer.stop()
|
||||
muxer.release()
|
||||
|
||||
// Extract frame with MediaMetadataRetriever
|
||||
val retriever = MediaMetadataRetriever()
|
||||
retriever.setDataSource(tempMp4.absolutePath)
|
||||
val bitmap = retriever.getFrameAtTime(0, MediaMetadataRetriever.OPTION_CLOSEST_SYNC)
|
||||
retriever.release()
|
||||
|
||||
if (bitmap != null) {
|
||||
FileOutputStream(thumbFile).use { fos ->
|
||||
bitmap.compress(Bitmap.CompressFormat.JPEG, 80, fos)
|
||||
}
|
||||
bitmap.recycle()
|
||||
Log.i(TAG, "Thumbnail generated: ${thumbFile.absolutePath}")
|
||||
}
|
||||
} finally {
|
||||
tempMp4.delete()
|
||||
}
|
||||
} catch (e: Exception) {
|
||||
Log.e(TAG, "Failed to generate thumbnail for $segPath", e)
|
||||
}
|
||||
}
|
||||
|
||||
private fun readSpsPpsFromSegment(segPath: String): ByteArray? {
|
||||
try {
|
||||
val file = File(segPath)
|
||||
val raf = java.io.RandomAccessFile(file, "r")
|
||||
// Magic(4) + version(4) + width(4) + height(4) = 16 bytes
|
||||
raf.seek(16)
|
||||
val spsPpsSize = Integer.reverseBytes(raf.readInt()).let {
|
||||
// The file is written in native byte order (little-endian on ARM)
|
||||
// RandomAccessFile.readInt() reads big-endian, so we need to handle this
|
||||
// Actually, our writeVal writes raw bytes, so we should read raw bytes too
|
||||
raf.seek(16)
|
||||
val buf = ByteArray(4)
|
||||
raf.read(buf)
|
||||
(buf[0].toInt() and 0xFF) or
|
||||
((buf[1].toInt() and 0xFF) shl 8) or
|
||||
((buf[2].toInt() and 0xFF) shl 16) or
|
||||
((buf[3].toInt() and 0xFF) shl 24)
|
||||
}
|
||||
if (spsPpsSize <= 0 || spsPpsSize > 1024) {
|
||||
raf.close()
|
||||
return null
|
||||
}
|
||||
val data = ByteArray(spsPpsSize)
|
||||
raf.read(data)
|
||||
raf.close()
|
||||
return data
|
||||
} catch (e: Exception) {
|
||||
Log.e(TAG, "Failed to read SPS/PPS from segment", e)
|
||||
return null
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
package com.omixlab.lckcontrol.cortex
|
||||
|
||||
import android.content.Context
|
||||
import android.content.SharedPreferences
|
||||
import dagger.hilt.android.qualifiers.ApplicationContext
|
||||
import javax.inject.Inject
|
||||
import javax.inject.Singleton
|
||||
|
||||
@Singleton
|
||||
class CortexPreferences @Inject constructor(
|
||||
@ApplicationContext context: Context,
|
||||
) {
|
||||
private val prefs: SharedPreferences =
|
||||
context.getSharedPreferences("lck_cortex_prefs", Context.MODE_PRIVATE)
|
||||
|
||||
fun isEnabled(): Boolean = prefs.getBoolean(KEY_ENABLED, false)
|
||||
|
||||
fun setEnabled(enabled: Boolean) {
|
||||
prefs.edit().putBoolean(KEY_ENABLED, enabled).apply()
|
||||
}
|
||||
|
||||
fun getMaxMinutes(): Int = prefs.getInt(KEY_MAX_MINUTES, 10)
|
||||
|
||||
fun setMaxMinutes(minutes: Int) {
|
||||
prefs.edit().putInt(KEY_MAX_MINUTES, minutes).apply()
|
||||
}
|
||||
|
||||
private companion object {
|
||||
const val KEY_ENABLED = "cortex_enabled"
|
||||
const val KEY_MAX_MINUTES = "cortex_max_minutes"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
package com.omixlab.lckcontrol.cortex
|
||||
|
||||
import java.io.File
|
||||
|
||||
data class CortexSession(
|
||||
val sessionId: String,
|
||||
val directory: File,
|
||||
val segmentCount: Int,
|
||||
val thumbnailPaths: List<String>,
|
||||
val totalSizeBytes: Long,
|
||||
val startTime: Long,
|
||||
)
|
||||
@@ -23,6 +23,8 @@ class NativeStreamingEngine {
|
||||
var onStats: ((StreamingStats) -> Unit)? = null
|
||||
var onError: ((Int, String) -> Unit)? = null
|
||||
var onBufferReleased: ((Int) -> Unit)? = null
|
||||
var onClipReady: ((String) -> Unit)? = null
|
||||
var onCortexSegment: ((segPath: String, keyframeData: ByteArray) -> Unit)? = null
|
||||
|
||||
fun create(
|
||||
width: Int,
|
||||
@@ -124,6 +126,33 @@ class NativeStreamingEngine {
|
||||
nativeSetCompositionLayerEnabled(nativePtr, layerId, enabled)
|
||||
}
|
||||
|
||||
// Clip recording
|
||||
fun enableClipRecording(width: Int, height: Int) {
|
||||
if (nativePtr == 0L) return
|
||||
nativeEnableClipRecording(nativePtr, width, height)
|
||||
}
|
||||
|
||||
fun flushClip(outputDir: String): Boolean {
|
||||
if (nativePtr == 0L) return false
|
||||
return nativeFlushClip(nativePtr, outputDir)
|
||||
}
|
||||
|
||||
fun disableClipRecording() {
|
||||
if (nativePtr == 0L) return
|
||||
nativeDisableClipRecording(nativePtr)
|
||||
}
|
||||
|
||||
// Cortex recording
|
||||
fun enableCortexRecording(sessionDir: String, maxMinutes: Int) {
|
||||
if (nativePtr == 0L) return
|
||||
nativeEnableCortexRecording(nativePtr, sessionDir, maxMinutes)
|
||||
}
|
||||
|
||||
fun disableCortexRecording() {
|
||||
if (nativePtr == 0L) return
|
||||
nativeDisableCortexRecording(nativePtr)
|
||||
}
|
||||
|
||||
// Called from native code (JNI callbacks)
|
||||
@Suppress("unused")
|
||||
private fun onNativeStats(videoBitrate: Long, audioBitrate: Long, fps: Int, droppedFrames: Int) {
|
||||
@@ -141,6 +170,18 @@ class NativeStreamingEngine {
|
||||
onBufferReleased?.invoke(bufferIndex)
|
||||
}
|
||||
|
||||
@Suppress("unused")
|
||||
private fun onNativeClipReady(path: String) {
|
||||
Log.i(TAG, "Clip ready: $path")
|
||||
onClipReady?.invoke(path)
|
||||
}
|
||||
|
||||
@Suppress("unused")
|
||||
private fun onNativeCortexSegment(segPath: String, keyframeData: ByteArray) {
|
||||
Log.i(TAG, "Cortex segment: $segPath (${keyframeData.size} bytes keyframe)")
|
||||
onCortexSegment?.invoke(segPath, keyframeData)
|
||||
}
|
||||
|
||||
// Native methods
|
||||
private external fun nativeCreate(
|
||||
width: Int, height: Int,
|
||||
@@ -174,4 +215,13 @@ class NativeStreamingEngine {
|
||||
)
|
||||
private external fun nativeUpdateCompositionLayerOpacity(ptr: Long, layerId: Int, opacity: Float)
|
||||
private external fun nativeSetCompositionLayerEnabled(ptr: Long, layerId: Int, enabled: Boolean)
|
||||
|
||||
// Clip recording
|
||||
private external fun nativeEnableClipRecording(ptr: Long, width: Int, height: Int)
|
||||
private external fun nativeFlushClip(ptr: Long, outputDir: String): Boolean
|
||||
private external fun nativeDisableClipRecording(ptr: Long)
|
||||
|
||||
// Cortex recording
|
||||
private external fun nativeEnableCortexRecording(ptr: Long, sessionDir: String, maxMinutes: Int)
|
||||
private external fun nativeDisableCortexRecording(ptr: Long)
|
||||
}
|
||||
|
||||
@@ -1,14 +1,26 @@
|
||||
package com.omixlab.lckcontrol.streaming
|
||||
|
||||
import android.content.Context
|
||||
import android.graphics.Bitmap
|
||||
import android.hardware.HardwareBuffer
|
||||
import android.util.Log
|
||||
import android.view.Surface
|
||||
import com.omixlab.lckcontrol.cortex.CortexManager
|
||||
import com.omixlab.lckcontrol.data.remote.LckApiService
|
||||
import com.omixlab.lckcontrol.shared.StreamPlan
|
||||
import com.omixlab.lckcontrol.shared.StreamingConfig
|
||||
import dagger.hilt.android.qualifiers.ApplicationContext
|
||||
import kotlinx.coroutines.CoroutineScope
|
||||
import kotlinx.coroutines.Dispatchers
|
||||
import kotlinx.coroutines.SupervisorJob
|
||||
import kotlinx.coroutines.launch
|
||||
import kotlinx.coroutines.flow.MutableStateFlow
|
||||
import kotlinx.coroutines.flow.StateFlow
|
||||
import kotlinx.coroutines.flow.asStateFlow
|
||||
import okhttp3.MediaType.Companion.toMediaType
|
||||
import okhttp3.MultipartBody
|
||||
import okhttp3.RequestBody.Companion.asRequestBody
|
||||
import java.io.File
|
||||
import java.nio.ByteBuffer
|
||||
import javax.inject.Inject
|
||||
import javax.inject.Singleton
|
||||
@@ -23,10 +35,16 @@ enum class StreamingState {
|
||||
* Stream keys and RTMP URLs stay within the app process — never exposed via AIDL.
|
||||
*/
|
||||
@Singleton
|
||||
class StreamingManager @Inject constructor() {
|
||||
class StreamingManager @Inject constructor(
|
||||
@ApplicationContext private val context: Context,
|
||||
private val apiService: LckApiService,
|
||||
private val cortexManager: CortexManager,
|
||||
) {
|
||||
|
||||
companion object {
|
||||
private const val TAG = "StreamingManager"
|
||||
private const val CLIP_INITIAL_DELAY_MS = 10_000L
|
||||
private const val CLIP_INTERVAL_MS = 120_000L
|
||||
}
|
||||
|
||||
private var engine: NativeStreamingEngine? = null
|
||||
@@ -43,6 +61,10 @@ class StreamingManager @Inject constructor() {
|
||||
private val _error = MutableStateFlow<String?>(null)
|
||||
val error: StateFlow<String?> = _error.asStateFlow()
|
||||
|
||||
private var clipTimer: java.util.Timer? = null
|
||||
private var currentPlanId: String? = null
|
||||
private val clipScope = CoroutineScope(SupervisorJob() + Dispatchers.IO)
|
||||
|
||||
/**
|
||||
* Start streaming for a plan with APP_STREAMING execution mode.
|
||||
* RTMP URLs are constructed internally from the plan's destinations.
|
||||
@@ -108,6 +130,24 @@ class StreamingManager @Inject constructor() {
|
||||
engine = eng
|
||||
_state.value = StreamingState.LIVE
|
||||
Log.i(TAG, "Streaming started with ${destinations.size} destinations")
|
||||
|
||||
// Enable cortex recording on the streaming engine
|
||||
cortexManager.onStreamingStarting(eng)
|
||||
|
||||
// Start clip recording
|
||||
currentPlanId = plan.planId
|
||||
val clipsDir = File(context.cacheDir, "clips").apply { mkdirs() }
|
||||
eng.enableClipRecording(actualWidth, actualHeight)
|
||||
eng.onClipReady = { path ->
|
||||
clipScope.launch { uploadPreviewClip(path) }
|
||||
}
|
||||
clipTimer = java.util.Timer().apply {
|
||||
schedule(object : java.util.TimerTask() {
|
||||
override fun run() {
|
||||
engine?.flushClip(clipsDir.absolutePath)
|
||||
}
|
||||
}, CLIP_INITIAL_DELAY_MS, CLIP_INTERVAL_MS)
|
||||
}
|
||||
} else {
|
||||
eng.destroy()
|
||||
_error.value = "Failed to start streaming engine"
|
||||
@@ -130,9 +170,12 @@ class StreamingManager @Inject constructor() {
|
||||
texturePoolWidth = width
|
||||
texturePoolHeight = height
|
||||
Log.d(TAG, "Texture pool registered: ${buffers.size} buffers, ${width}x${height}")
|
||||
cortexManager.onBufferReleased = { idx -> onBufferReleased?.invoke(idx) }
|
||||
cortexManager.onTexturePoolRegistered(width, height)
|
||||
}
|
||||
|
||||
fun unregisterTexturePool() {
|
||||
cortexManager.onTexturePoolUnregistered()
|
||||
texturePoolBuffers = null
|
||||
texturePoolWidth = 0
|
||||
texturePoolHeight = 0
|
||||
@@ -156,11 +199,13 @@ class StreamingManager @Inject constructor() {
|
||||
return
|
||||
}
|
||||
val eng = engine
|
||||
if (eng == null) {
|
||||
if (videoFrameCount++ % 30 == 0) Log.w(TAG, "submitVideoFrame: engine is null (state=${_state.value})")
|
||||
return
|
||||
if (eng != null) {
|
||||
eng.submitVideoFrame(buffers[bufferIndex], timestampNs, fenceFd, bufferIndex)
|
||||
} else if (cortexManager.hasCortexEngine) {
|
||||
cortexManager.submitVideoFrame(buffers[bufferIndex], timestampNs, fenceFd, bufferIndex)
|
||||
} else {
|
||||
onBufferReleased?.invoke(bufferIndex)
|
||||
}
|
||||
eng.submitVideoFrame(buffers[bufferIndex], timestampNs, fenceFd, bufferIndex)
|
||||
if (++videoFrameCount % 30 == 0) {
|
||||
Log.d(TAG, "submitVideoFrame: forwarded frame #$videoFrameCount idx=$bufferIndex")
|
||||
}
|
||||
@@ -168,7 +213,12 @@ class StreamingManager @Inject constructor() {
|
||||
|
||||
/** Forward audio PCM from the game to the native engine. */
|
||||
fun submitAudioFrame(pcmData: ByteArray, timestampNs: Long) {
|
||||
engine?.submitAudioFrame(pcmData, timestampNs)
|
||||
val eng = engine
|
||||
if (eng != null) {
|
||||
eng.submitAudioFrame(pcmData, timestampNs)
|
||||
} else if (cortexManager.hasCortexEngine) {
|
||||
cortexManager.submitAudioFrame(pcmData, timestampNs)
|
||||
}
|
||||
}
|
||||
|
||||
/** Stop streaming and release all resources. */
|
||||
@@ -180,12 +230,21 @@ class StreamingManager @Inject constructor() {
|
||||
|
||||
_state.value = StreamingState.STOPPING
|
||||
|
||||
// Stop clip recording
|
||||
clipTimer?.cancel()
|
||||
clipTimer = null
|
||||
engine?.disableClipRecording()
|
||||
currentPlanId = null
|
||||
|
||||
engine?.let { eng ->
|
||||
eng.stop()
|
||||
eng.destroy()
|
||||
}
|
||||
engine = null
|
||||
|
||||
// Restart cortex-only engine if game is still connected
|
||||
cortexManager.onStreamingStopped()
|
||||
|
||||
_state.value = StreamingState.IDLE
|
||||
_stats.value = StreamingStats()
|
||||
Log.i(TAG, "Streaming stopped")
|
||||
@@ -238,6 +297,22 @@ class StreamingManager @Inject constructor() {
|
||||
engine?.setCompositionLayerEnabled(layerId, enabled)
|
||||
}
|
||||
|
||||
private suspend fun uploadPreviewClip(clipPath: String) {
|
||||
try {
|
||||
val file = File(clipPath)
|
||||
if (!file.exists() || file.length() == 0L) return
|
||||
val planId = currentPlanId ?: return
|
||||
|
||||
val body = file.asRequestBody("video/mp4".toMediaType())
|
||||
val part = MultipartBody.Part.createFormData("preview", file.name, body)
|
||||
apiService.uploadPreview(planId, part)
|
||||
Log.i(TAG, "Preview clip uploaded for plan $planId")
|
||||
file.delete()
|
||||
} catch (e: Exception) {
|
||||
Log.e(TAG, "Failed to upload preview clip", e)
|
||||
}
|
||||
}
|
||||
|
||||
private fun bitmapToRgba(bitmap: Bitmap): ByteArray {
|
||||
val argbBitmap = if (bitmap.config != Bitmap.Config.ARGB_8888) {
|
||||
bitmap.copy(Bitmap.Config.ARGB_8888, false)
|
||||
|
||||
@@ -0,0 +1,293 @@
|
||||
package com.omixlab.lckcontrol.ui.cortex
|
||||
|
||||
import androidx.compose.animation.core.RepeatMode
|
||||
import androidx.compose.animation.core.animateFloat
|
||||
import androidx.compose.animation.core.infiniteRepeatable
|
||||
import androidx.compose.animation.core.rememberInfiniteTransition
|
||||
import androidx.compose.animation.core.tween
|
||||
import androidx.compose.foundation.layout.Arrangement
|
||||
import androidx.compose.foundation.layout.Column
|
||||
import androidx.compose.foundation.layout.Row
|
||||
import androidx.compose.foundation.layout.Spacer
|
||||
import androidx.compose.foundation.layout.fillMaxSize
|
||||
import androidx.compose.foundation.layout.fillMaxWidth
|
||||
import androidx.compose.foundation.layout.height
|
||||
import androidx.compose.foundation.layout.padding
|
||||
import androidx.compose.foundation.layout.size
|
||||
import androidx.compose.foundation.layout.width
|
||||
import androidx.compose.foundation.lazy.LazyColumn
|
||||
import androidx.compose.foundation.lazy.LazyRow
|
||||
import androidx.compose.foundation.lazy.items
|
||||
import androidx.compose.foundation.shape.RoundedCornerShape
|
||||
import androidx.compose.material.icons.Icons
|
||||
import androidx.compose.material.icons.filled.Circle
|
||||
import androidx.compose.material.icons.filled.Delete
|
||||
import androidx.compose.material.icons.filled.DeleteSweep
|
||||
import androidx.compose.material3.Card
|
||||
import androidx.compose.material3.CardDefaults
|
||||
import androidx.compose.material3.ExperimentalMaterial3Api
|
||||
import androidx.compose.material3.FilterChip
|
||||
import androidx.compose.material3.Icon
|
||||
import androidx.compose.material3.IconButton
|
||||
import androidx.compose.material3.LinearProgressIndicator
|
||||
import androidx.compose.material3.MaterialTheme
|
||||
import androidx.compose.material3.Scaffold
|
||||
import androidx.compose.material3.Switch
|
||||
import androidx.compose.material3.Text
|
||||
import androidx.compose.material3.TextButton
|
||||
import androidx.compose.material3.TopAppBar
|
||||
import androidx.compose.runtime.Composable
|
||||
import androidx.compose.runtime.getValue
|
||||
import androidx.compose.ui.Alignment
|
||||
import androidx.compose.ui.Modifier
|
||||
import androidx.compose.ui.draw.alpha
|
||||
import androidx.compose.ui.draw.clip
|
||||
import androidx.compose.ui.graphics.Color
|
||||
import androidx.compose.ui.graphics.asImageBitmap
|
||||
import androidx.compose.ui.layout.ContentScale
|
||||
import androidx.compose.ui.text.font.FontWeight
|
||||
import androidx.compose.ui.unit.dp
|
||||
import android.graphics.BitmapFactory
|
||||
import androidx.compose.foundation.Image
|
||||
import androidx.compose.runtime.remember
|
||||
import androidx.hilt.navigation.compose.hiltViewModel
|
||||
import androidx.lifecycle.compose.collectAsStateWithLifecycle
|
||||
import com.omixlab.lckcontrol.cortex.CortexSession
|
||||
import java.io.File
|
||||
import java.text.SimpleDateFormat
|
||||
import java.util.Date
|
||||
import java.util.Locale
|
||||
|
||||
@OptIn(ExperimentalMaterial3Api::class)
|
||||
@Composable
|
||||
fun CortexScreen(
|
||||
viewModel: CortexViewModel = hiltViewModel(),
|
||||
) {
|
||||
val sessions by viewModel.sessions.collectAsStateWithLifecycle()
|
||||
val isRecording by viewModel.isRecording.collectAsStateWithLifecycle()
|
||||
val storageUsedBytes by viewModel.storageUsedBytes.collectAsStateWithLifecycle()
|
||||
val isEnabled by viewModel.isEnabled.collectAsStateWithLifecycle()
|
||||
val maxMinutes by viewModel.maxMinutes.collectAsStateWithLifecycle()
|
||||
|
||||
Scaffold(
|
||||
topBar = {
|
||||
TopAppBar(
|
||||
title = {
|
||||
Row(verticalAlignment = Alignment.CenterVertically) {
|
||||
Text("Cortex")
|
||||
if (isRecording) {
|
||||
Spacer(Modifier.width(8.dp))
|
||||
RecordingIndicator()
|
||||
}
|
||||
}
|
||||
},
|
||||
actions = {
|
||||
if (sessions.isNotEmpty()) {
|
||||
IconButton(onClick = { viewModel.deleteAllSessions() }) {
|
||||
Icon(Icons.Default.DeleteSweep, contentDescription = "Delete all")
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
},
|
||||
) { innerPadding ->
|
||||
LazyColumn(
|
||||
modifier = Modifier
|
||||
.fillMaxSize()
|
||||
.padding(innerPadding)
|
||||
.padding(horizontal = 16.dp),
|
||||
verticalArrangement = Arrangement.spacedBy(12.dp),
|
||||
) {
|
||||
// Enable/Disable toggle
|
||||
item {
|
||||
Card(modifier = Modifier.fillMaxWidth()) {
|
||||
Row(
|
||||
modifier = Modifier
|
||||
.fillMaxWidth()
|
||||
.padding(16.dp),
|
||||
horizontalArrangement = Arrangement.SpaceBetween,
|
||||
verticalAlignment = Alignment.CenterVertically,
|
||||
) {
|
||||
Column {
|
||||
Text("Background Recording", style = MaterialTheme.typography.titleMedium)
|
||||
Text(
|
||||
"Continuously record gameplay in the background",
|
||||
style = MaterialTheme.typography.bodySmall,
|
||||
color = MaterialTheme.colorScheme.onSurfaceVariant,
|
||||
)
|
||||
}
|
||||
Switch(
|
||||
checked = isEnabled,
|
||||
onCheckedChange = { viewModel.setEnabled(it) },
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Duration presets
|
||||
if (isEnabled) {
|
||||
item {
|
||||
Column {
|
||||
Text(
|
||||
"Buffer Duration",
|
||||
style = MaterialTheme.typography.titleSmall,
|
||||
modifier = Modifier.padding(bottom = 8.dp),
|
||||
)
|
||||
Row(horizontalArrangement = Arrangement.spacedBy(8.dp)) {
|
||||
listOf(5, 10, 15, 20).forEach { minutes ->
|
||||
FilterChip(
|
||||
selected = maxMinutes == minutes,
|
||||
onClick = { viewModel.setMaxMinutes(minutes) },
|
||||
label = { Text("${minutes}m") },
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Storage card
|
||||
item {
|
||||
val availableBytes = viewModel.getAvailableStorageBytes()
|
||||
val usedMb = storageUsedBytes / (1024.0 * 1024.0)
|
||||
val availableGb = availableBytes / (1024.0 * 1024.0 * 1024.0)
|
||||
val progress = if (availableBytes > 0) {
|
||||
(storageUsedBytes.toFloat() / availableBytes).coerceIn(0f, 1f)
|
||||
} else 0f
|
||||
|
||||
Card(modifier = Modifier.fillMaxWidth()) {
|
||||
Column(modifier = Modifier.padding(16.dp)) {
|
||||
Text("Storage", style = MaterialTheme.typography.titleSmall)
|
||||
Spacer(Modifier.height(8.dp))
|
||||
LinearProgressIndicator(
|
||||
progress = { progress },
|
||||
modifier = Modifier.fillMaxWidth(),
|
||||
)
|
||||
Spacer(Modifier.height(4.dp))
|
||||
Text(
|
||||
"Using %.1f MB of %.1f GB available".format(usedMb, availableGb),
|
||||
style = MaterialTheme.typography.bodySmall,
|
||||
color = MaterialTheme.colorScheme.onSurfaceVariant,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Session list
|
||||
if (sessions.isEmpty()) {
|
||||
item {
|
||||
Text(
|
||||
"No recordings yet",
|
||||
style = MaterialTheme.typography.bodyMedium,
|
||||
color = MaterialTheme.colorScheme.onSurfaceVariant,
|
||||
modifier = Modifier.padding(vertical = 24.dp),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
items(sessions, key = { it.sessionId }) { session ->
|
||||
SessionCard(
|
||||
session = session,
|
||||
onDelete = { viewModel.deleteSession(session.sessionId) },
|
||||
)
|
||||
}
|
||||
|
||||
// Bottom spacer
|
||||
item { Spacer(Modifier.height(16.dp)) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Composable
|
||||
private fun RecordingIndicator() {
|
||||
val infiniteTransition = rememberInfiniteTransition(label = "recording")
|
||||
val alpha by infiniteTransition.animateFloat(
|
||||
initialValue = 1f,
|
||||
targetValue = 0.2f,
|
||||
animationSpec = infiniteRepeatable(
|
||||
animation = tween(800),
|
||||
repeatMode = RepeatMode.Reverse,
|
||||
),
|
||||
label = "pulse",
|
||||
)
|
||||
Icon(
|
||||
Icons.Default.Circle,
|
||||
contentDescription = "Recording",
|
||||
tint = Color.Red,
|
||||
modifier = Modifier
|
||||
.size(12.dp)
|
||||
.alpha(alpha),
|
||||
)
|
||||
}
|
||||
|
||||
@Composable
|
||||
private fun SessionCard(
|
||||
session: CortexSession,
|
||||
onDelete: () -> Unit,
|
||||
) {
|
||||
val dateFormat = SimpleDateFormat("MMM d, h:mm a", Locale.getDefault())
|
||||
val sizeMb = session.totalSizeBytes / (1024.0 * 1024.0)
|
||||
|
||||
Card(
|
||||
modifier = Modifier.fillMaxWidth(),
|
||||
colors = CardDefaults.cardColors(
|
||||
containerColor = MaterialTheme.colorScheme.surfaceVariant,
|
||||
),
|
||||
) {
|
||||
Column(modifier = Modifier.padding(12.dp)) {
|
||||
// Thumbnail row
|
||||
if (session.thumbnailPaths.isNotEmpty()) {
|
||||
LazyRow(
|
||||
horizontalArrangement = Arrangement.spacedBy(4.dp),
|
||||
modifier = Modifier
|
||||
.fillMaxWidth()
|
||||
.height(80.dp),
|
||||
) {
|
||||
items(session.thumbnailPaths) { thumbPath ->
|
||||
val bitmap = remember(thumbPath) {
|
||||
BitmapFactory.decodeFile(thumbPath)
|
||||
}
|
||||
if (bitmap != null) {
|
||||
Image(
|
||||
bitmap = bitmap.asImageBitmap(),
|
||||
contentDescription = null,
|
||||
contentScale = ContentScale.Crop,
|
||||
modifier = Modifier
|
||||
.size(width = 120.dp, height = 80.dp)
|
||||
.clip(RoundedCornerShape(4.dp)),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
Spacer(Modifier.height(8.dp))
|
||||
}
|
||||
|
||||
// Info row
|
||||
Row(
|
||||
modifier = Modifier.fillMaxWidth(),
|
||||
horizontalArrangement = Arrangement.SpaceBetween,
|
||||
verticalAlignment = Alignment.CenterVertically,
|
||||
) {
|
||||
Column {
|
||||
Text(
|
||||
dateFormat.format(Date(session.startTime)),
|
||||
style = MaterialTheme.typography.bodyMedium,
|
||||
fontWeight = FontWeight.Medium,
|
||||
)
|
||||
Text(
|
||||
"${session.segmentCount} segments, %.1f MB".format(sizeMb),
|
||||
style = MaterialTheme.typography.bodySmall,
|
||||
color = MaterialTheme.colorScheme.onSurfaceVariant,
|
||||
)
|
||||
}
|
||||
IconButton(onClick = onDelete) {
|
||||
Icon(
|
||||
Icons.Default.Delete,
|
||||
contentDescription = "Delete session",
|
||||
tint = MaterialTheme.colorScheme.error,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
package com.omixlab.lckcontrol.ui.cortex
|
||||
|
||||
import android.os.StatFs
|
||||
import androidx.lifecycle.ViewModel
|
||||
import androidx.lifecycle.viewModelScope
|
||||
import com.omixlab.lckcontrol.cortex.CortexManager
|
||||
import com.omixlab.lckcontrol.cortex.CortexPreferences
|
||||
import com.omixlab.lckcontrol.cortex.CortexSession
|
||||
import dagger.hilt.android.lifecycle.HiltViewModel
|
||||
import kotlinx.coroutines.flow.MutableStateFlow
|
||||
import kotlinx.coroutines.flow.StateFlow
|
||||
import kotlinx.coroutines.flow.asStateFlow
|
||||
import kotlinx.coroutines.launch
|
||||
import java.io.File
|
||||
import javax.inject.Inject
|
||||
|
||||
@HiltViewModel
|
||||
class CortexViewModel @Inject constructor(
|
||||
private val cortexManager: CortexManager,
|
||||
private val cortexPreferences: CortexPreferences,
|
||||
) : ViewModel() {
|
||||
|
||||
val sessions: StateFlow<List<CortexSession>> = cortexManager.sessions
|
||||
val isRecording: StateFlow<Boolean> = cortexManager.isRecording
|
||||
val storageUsedBytes: StateFlow<Long> = cortexManager.storageUsedBytes
|
||||
|
||||
private val _isEnabled = MutableStateFlow(cortexPreferences.isEnabled())
|
||||
val isEnabled: StateFlow<Boolean> = _isEnabled.asStateFlow()
|
||||
|
||||
private val _maxMinutes = MutableStateFlow(cortexPreferences.getMaxMinutes())
|
||||
val maxMinutes: StateFlow<Int> = _maxMinutes.asStateFlow()
|
||||
|
||||
init {
|
||||
cortexManager.refreshSessions()
|
||||
}
|
||||
|
||||
fun setEnabled(enabled: Boolean) {
|
||||
cortexPreferences.setEnabled(enabled)
|
||||
_isEnabled.value = enabled
|
||||
}
|
||||
|
||||
fun setMaxMinutes(minutes: Int) {
|
||||
cortexPreferences.setMaxMinutes(minutes)
|
||||
_maxMinutes.value = minutes
|
||||
}
|
||||
|
||||
fun deleteSession(sessionId: String) {
|
||||
viewModelScope.launch {
|
||||
cortexManager.deleteSession(sessionId)
|
||||
}
|
||||
}
|
||||
|
||||
fun deleteAllSessions() {
|
||||
viewModelScope.launch {
|
||||
cortexManager.deleteAllSessions()
|
||||
}
|
||||
}
|
||||
|
||||
fun getAvailableStorageBytes(): Long {
|
||||
return try {
|
||||
val stat = StatFs(File("/data").absolutePath)
|
||||
stat.availableBlocksLong * stat.blockSizeLong
|
||||
} catch (_: Exception) {
|
||||
0L
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import androidx.compose.foundation.layout.padding
|
||||
import androidx.compose.material.icons.Icons
|
||||
import androidx.compose.material.icons.filled.Dashboard
|
||||
import androidx.compose.material.icons.filled.Devices
|
||||
import androidx.compose.material.icons.filled.FiberSmartRecord
|
||||
import androidx.compose.material.icons.filled.Person
|
||||
import androidx.compose.material3.Icon
|
||||
import androidx.compose.material3.NavigationBar
|
||||
@@ -26,6 +27,7 @@ import com.omixlab.lckcontrol.data.local.TokenStore
|
||||
import com.omixlab.lckcontrol.data.remote.LckApiService
|
||||
import com.omixlab.lckcontrol.ui.accounts.AccountsScreen
|
||||
import com.omixlab.lckcontrol.ui.clients.ActiveClientsScreen
|
||||
import com.omixlab.lckcontrol.ui.cortex.CortexScreen
|
||||
import com.omixlab.lckcontrol.ui.dashboard.DashboardScreen
|
||||
import com.omixlab.lckcontrol.ui.login.LoginScreen
|
||||
import com.omixlab.lckcontrol.ui.chat.ChatScreen
|
||||
@@ -41,6 +43,7 @@ private data class BottomNavItem(
|
||||
private val bottomNavItems = listOf(
|
||||
BottomNavItem(Screen.Dashboard, "Dashboard", Icons.Default.Dashboard),
|
||||
BottomNavItem(Screen.Accounts, "Accounts", Icons.Default.Person),
|
||||
BottomNavItem(Screen.Cortex, "Cortex", Icons.Default.FiberSmartRecord),
|
||||
BottomNavItem(Screen.ActiveClients, "Clients", Icons.Default.Devices),
|
||||
)
|
||||
|
||||
@@ -163,6 +166,9 @@ fun AppNavigation(tokenStore: TokenStore, apiService: LckApiService) {
|
||||
) {
|
||||
ChatScreen(onBack = { navController.popBackStack() })
|
||||
}
|
||||
composable(Screen.Cortex.route) {
|
||||
CortexScreen()
|
||||
}
|
||||
composable(Screen.ActiveClients.route) {
|
||||
ActiveClientsScreen(
|
||||
onNavigateToPlan = { planId ->
|
||||
|
||||
@@ -11,6 +11,7 @@ sealed class Screen(val route: String) {
|
||||
data object PlanDetail : Screen("plan_detail/{planId}") {
|
||||
fun createRoute(planId: String) = "plan_detail/$planId"
|
||||
}
|
||||
data object Cortex : Screen("cortex")
|
||||
data object ActiveClients : Screen("active_clients")
|
||||
data object Chat : Screen("chat/{planId}/{service}/{destinationId}") {
|
||||
fun createRoute(planId: String, service: String, destinationId: String) =
|
||||
|
||||
Reference in New Issue
Block a user