image capture

This commit is contained in:
SaiD 2025-12-12 23:32:13 +05:30
parent f647a509d5
commit a392807855
18 changed files with 1284 additions and 380 deletions

View File

@ -0,0 +1,383 @@
package com.example.livingai.data.camera
import android.content.Context
import android.graphics.Bitmap
import android.graphics.RectF
import android.util.Log
import com.example.livingai.R
import com.example.livingai.domain.camera.*
import com.example.livingai.domain.model.camera.*
import com.example.livingai.utils.SilhouetteManager
import org.tensorflow.lite.Interpreter
import org.tensorflow.lite.support.common.FileUtil
import java.io.IOException
import java.nio.ByteBuffer
import java.nio.ByteOrder
import kotlin.math.abs
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.segmentation.subject.SubjectSegmentation
import com.google.mlkit.vision.segmentation.subject.SubjectSegmenterOptions
import kotlinx.coroutines.suspendCancellableCoroutine
import kotlin.coroutines.resume
class DefaultOrientationChecker : OrientationChecker {
override suspend fun analyze(input: PipelineInput): Instruction {
val orientationLower = input.orientation.lowercase()
val isPortraitRequired = orientationLower == "front" || orientationLower == "back"
// Corrected Logic:
// 90 or 270 degrees means the device is held in PORTRAIT
val isDevicePortrait = input.deviceOrientation == 90 || input.deviceOrientation == 270
// 0 or 180 degrees means the device is held in LANDSCAPE
val isDeviceLandscape = input.deviceOrientation == 0 || input.deviceOrientation == 180
var isValid = true
var message = "Orientation Correct"
if (isPortraitRequired && !isDevicePortrait) {
isValid = false
message = "Turn to portrait mode"
} else if (!isPortraitRequired && !isDeviceLandscape) {
isValid = false
message = "Turn to landscape mode"
}
val animRes = if (!isValid) R.drawable.ic_launcher_foreground else null
return Instruction(
message = message,
animationResId = animRes,
isValid = isValid,
result = OrientationResult(input.deviceOrientation, if (isPortraitRequired) CameraOrientation.PORTRAIT else CameraOrientation.LANDSCAPE)
)
}
}
class DefaultTiltChecker : TiltChecker {
override suspend fun analyze(input: PipelineInput): Instruction {
Log.d("TiltChecker", "Required Orientation: ${input.requiredOrientation}, Pitch: ${input.devicePitch}, Roll: ${input.deviceRoll}, Azimuth: ${input.deviceAzimuth}")
val tolerance = 10.0f
val isLevel: Boolean
if (input.requiredOrientation == CameraOrientation.PORTRAIT) {
// Ideal for portrait: pitch around -90, roll around 0
val idealPitch = -90.0f
isLevel = abs(input.devicePitch - idealPitch) <= tolerance
} else { // LANDSCAPE
// Ideal for landscape: pitch around 0, roll around +/-90
val idealPitch = 0.0f
isLevel = abs(input.devicePitch - idealPitch) <= tolerance
}
val message = if (isLevel) "Device is level" else "Keep the phone straight"
return Instruction(
message = message,
isValid = isLevel,
result = TiltResult(input.deviceRoll, input.devicePitch, isLevel)
)
}
}
class TFLiteObjectDetector(context: Context) : ObjectDetector {
private var interpreter: Interpreter? = null
private var labels: List<String> = emptyList()
private var modelInputWidth: Int = 0
private var modelInputHeight: Int = 0
init {
try {
val modelBuffer = FileUtil.loadMappedFile(context, "efficientdet-lite0.tflite")
interpreter = Interpreter(modelBuffer)
labels = FileUtil.loadLabels(context, "labels.txt")
val inputTensor = interpreter?.getInputTensor(0)
val inputShape = inputTensor?.shape()
if (inputShape != null && inputShape.size >= 3) {
modelInputWidth = inputShape[1]
modelInputHeight = inputShape[2]
} else {
Log.e("TFLiteObjectDetector", "Invalid input tensor shape.")
}
Log.d("TFLiteObjectDetector", "TFLite model loaded successfully.")
} catch (e: IOException) {
Log.e("TFLiteObjectDetector", "Error loading TFLite model or labels from assets.", e)
Log.e("TFLiteObjectDetector", "Please ensure 'efficientdet-lite0.tflite' and 'labelmap.txt' are in the 'app/src/main/assets' directory.")
interpreter = null
}
}
override suspend fun analyze(input: PipelineInput): Instruction {
if (interpreter == null) {
return Instruction("Object detector not initialized. Check asset files.", isValid = false)
}
val image = input.image ?: return Instruction("Waiting for camera...", isValid = false)
val resizedBitmap = Bitmap.createScaledBitmap(image, modelInputWidth, modelInputHeight, true)
val byteBuffer = convertBitmapToByteBuffer(resizedBitmap)
// Define model outputs with the correct size
val maxDetections = 25
val outputLocations = Array(1) { Array(maxDetections) { FloatArray(4) } }
val outputClasses = Array(1) { FloatArray(maxDetections) }
val outputScores = Array(1) { FloatArray(maxDetections) }
val numDetections = FloatArray(1)
val outputs: MutableMap<Int, Any> = HashMap()
outputs[0] = outputLocations
outputs[1] = outputClasses
outputs[2] = outputScores
outputs[3] = numDetections
interpreter?.runForMultipleInputsOutputs(arrayOf(byteBuffer), outputs)
val detectedObjects = mutableListOf<Detection>()
val detectionCount = numDetections[0].toInt()
for (i in 0 until detectionCount) {
val score = outputScores[0][i]
if (score > 0.5f) { // Confidence threshold
val classIndex = outputClasses[0][i].toInt()
val label = labels.getOrElse(classIndex) { "Unknown" }
val location = outputLocations[0][i]
// TF Lite model returns ymin, xmin, ymax, xmax in normalized coordinates
val ymin = location[0] * image.height
val xmin = location[1] * image.width
val ymax = location[2] * image.height
val xmax = location[3] * image.width
val boundingBox = RectF(xmin, ymin, xmax, ymax)
detectedObjects.add(Detection(label, score, boundingBox))
}
}
val targetAnimalDetected = detectedObjects.find { it.label.equals(input.targetAnimal, ignoreCase = true) }
val isValid = targetAnimalDetected != null
val message = if (isValid) {
"${input.targetAnimal} Detected"
} else {
if (detectedObjects.isEmpty()) "No objects detected" else "Animal not detected, move closer or point camera to the animal"
}
val refObjects = detectedObjects
.filter { it !== targetAnimalDetected }
.mapIndexed { index, detection ->
ReferenceObject(
id = "ref_$index",
label = detection.label,
bounds = detection.bounds,
relativeHeight = detection.bounds.height() / image.height,
relativeWidth = detection.bounds.width() / image.width,
distance = 1.0f // Placeholder
)
}
return Instruction(
message = message,
isValid = isValid,
result = DetectionResult(
isAnimalDetected = isValid,
animalBounds = targetAnimalDetected?.bounds,
referenceObjects = refObjects,
label = targetAnimalDetected?.label,
confidence = targetAnimalDetected?.confidence ?: 0f
)
)
}
private fun convertBitmapToByteBuffer(bitmap: Bitmap): ByteBuffer {
val byteBuffer = ByteBuffer.allocateDirect(1 * modelInputWidth * modelInputHeight * 3)
byteBuffer.order(ByteOrder.nativeOrder())
val intValues = IntArray(modelInputWidth * modelInputHeight)
bitmap.getPixels(intValues, 0, bitmap.width, 0, 0, bitmap.width, bitmap.height)
var pixel = 0
for (i in 0 until modelInputWidth) {
for (j in 0 until modelInputHeight) {
val `val` = intValues[pixel++]
// Assuming model expects UINT8 [0, 255]
byteBuffer.put(((`val` shr 16) and 0xFF).toByte())
byteBuffer.put(((`val` shr 8) and 0xFF).toByte())
byteBuffer.put((`val` and 0xFF).toByte())
}
}
return byteBuffer
}
data class Detection(val label: String, val confidence: Float, val bounds: RectF)
}
class MockPoseAnalyzer : PoseAnalyzer {
private val segmenter by lazy {
val options = SubjectSegmenterOptions.Builder()
.enableForegroundConfidenceMask()
.build()
SubjectSegmentation.getClient(options)
}
override suspend fun analyze(input: PipelineInput): Instruction {
val detectionResult = input.previousDetectionResult ?: return Instruction("No detection result", isValid = false)
if (!detectionResult.isAnimalDetected || detectionResult.animalBounds == null) {
return Instruction("Animal not detected", isValid = false)
}
val image = input.image ?: return Instruction("No image", isValid = false)
val animalMask = getAnimalSegmentation(image) // BooleanArray
// --- Get weighted silhouette mask ---
var weightedSilhouette = SilhouetteManager.getWeightedMask(input.orientation)
?: return Instruction("Silhouette not found for ${input.orientation}", isValid = false)
// Ensure silhouette mask matches camera frame size
if (weightedSilhouette.width != image.width || weightedSilhouette.height != image.height) {
weightedSilhouette =
Bitmap.createScaledBitmap(weightedSilhouette, image.width, image.height, true)
}
val weightMap = convertWeightedBitmapToFloatArray(weightedSilhouette)
// --- Compute weighted Jaccard ---
val jaccard = calculateWeightedJaccard(animalMask, weightMap)
Log.d("MockPoseAnalyzer", "Weighted Jaccard Similarity = $jaccard")
val isValid = jaccard >= 0.40f
val msg = if (isValid) "Pose Correct" else "Pose Incorrect (Jaccard: %.2f)".format(jaccard)
return Instruction(message = msg, isValid = isValid, result = detectionResult)
}
// ----------------------------------------------------------------------
// REAL segmentation using ML Kit
// ----------------------------------------------------------------------
private suspend fun getAnimalSegmentation(bitmap: Bitmap): BooleanArray = suspendCancellableCoroutine { continuation ->
val inputImage = InputImage.fromBitmap(bitmap, 0)
segmenter.process(inputImage)
.addOnSuccessListener { result ->
val mask = result.foregroundConfidenceMask
if (mask != null) {
val floatArray = FloatArray(mask.capacity())
mask.get(floatArray)
val booleanArray = BooleanArray(floatArray.size) { i ->
floatArray[i] > 0.5f
}
continuation.resume(booleanArray)
} else {
Log.e("MockPoseAnalyzer", "Segmentation result null")
continuation.resume(BooleanArray(bitmap.width * bitmap.height) { false })
}
}
.addOnFailureListener { e ->
Log.e("MockPoseAnalyzer", "Segmentation failed", e)
continuation.resume(BooleanArray(bitmap.width * bitmap.height) { false })
}
}
// ----------------------------------------------------------------------
// Convert weighted mask bitmap → float[] values in range -1..1
// ----------------------------------------------------------------------
private fun convertWeightedBitmapToFloatArray(bitmap: Bitmap): FloatArray {
val w = bitmap.width
val h = bitmap.height
val pixels = IntArray(w * h)
bitmap.getPixels(pixels, 0, w, 0, 0, w, h)
val out = FloatArray(w * h)
for (i in pixels.indices) {
val color = pixels[i] and 0xFF
val norm = (color / 255f) * 2f - 1f // Converts 0..255 → -1..1
out[i] = norm
}
return out
}
// ----------------------------------------------------------------------
// Weighted Jaccard Similarity
// mask = predicted (BooleanArray)
// weightMap = ground truth silhouette weights (-1..1)
// ----------------------------------------------------------------------
private fun calculateWeightedJaccard(predMask: BooleanArray, weight: FloatArray): Float {
if (predMask.size != weight.size) return 0f
var weightedIntersection = 0f
var weightedUnion = 0f
for (i in predMask.indices) {
val w = weight[i] // -1.0 .. 1.0
val pred = predMask[i] // true/false
val silhouetteInside = w > 0f
val intersection = pred && silhouetteInside
val union = pred || silhouetteInside
if (intersection) weightedIntersection += w.coerceAtLeast(0f)
if (union) {
// Penalize far outside with negative weight also
weightedUnion += if (silhouetteInside) {
w.coerceAtLeast(0f)
} else {
(-w) // penalty
}
}
}
if (weightedUnion == 0f) return 0f
return weightedIntersection / weightedUnion
}
}
class DefaultCaptureHandler : CaptureHandler {
override suspend fun capture(input: PipelineInput, detectionResult: DetectionResult): CaptureData {
val image = input.image ?: throw IllegalStateException("Image cannot be null during capture")
val segmentationMask = BooleanArray(100) { true }
val animalMetrics = ObjectMetrics(
relativeHeight = 0.5f,
relativeWidth = 0.3f,
distance = 1.2f
)
return CaptureData(
image = image,
segmentationMask = segmentationMask,
animalMetrics = animalMetrics,
referenceObjects = detectionResult.referenceObjects
)
}
}
class DefaultMeasurementCalculator : MeasurementCalculator {
override fun calculateRealMetrics(
targetHeight: Float,
referenceObject: ReferenceObject,
currentMetrics: ObjectMetrics
): RealWorldMetrics {
if (referenceObject.relativeHeight == 0f) return RealWorldMetrics(0f, 0f, 0f)
val scale = targetHeight / referenceObject.relativeHeight
val realHeight = currentMetrics.relativeHeight * scale
val realWidth = currentMetrics.relativeWidth * scale
val realDistance = currentMetrics.distance
return RealWorldMetrics(
height = realHeight,
width = realWidth,
distance = realDistance
)
}
}

View File

@ -46,10 +46,12 @@ import com.example.livingai.pages.addprofile.AddProfileViewModel
import com.example.livingai.pages.camera.CameraViewModel import com.example.livingai.pages.camera.CameraViewModel
import com.example.livingai.pages.camera.VideoViewModel import com.example.livingai.pages.camera.VideoViewModel
import com.example.livingai.pages.home.HomeViewModel import com.example.livingai.pages.home.HomeViewModel
import com.example.livingai.pages.imagepreview.ImagePreviewViewModel
import com.example.livingai.pages.listings.ListingsViewModel import com.example.livingai.pages.listings.ListingsViewModel
import com.example.livingai.pages.onboarding.OnBoardingViewModel import com.example.livingai.pages.onboarding.OnBoardingViewModel
import com.example.livingai.pages.ratings.RatingViewModel import com.example.livingai.pages.ratings.RatingViewModel
import com.example.livingai.pages.settings.SettingsViewModel import com.example.livingai.pages.settings.SettingsViewModel
import com.example.livingai.pages.videopreview.VideoPreviewViewModel
import com.example.livingai.utils.Constants import com.example.livingai.utils.Constants
import com.example.livingai.utils.CoroutineDispatchers import com.example.livingai.utils.CoroutineDispatchers
import com.example.livingai.utils.DefaultCoroutineDispatchers import com.example.livingai.utils.DefaultCoroutineDispatchers
@ -57,12 +59,14 @@ import com.example.livingai.utils.ScreenDimensions
import com.example.livingai.utils.SilhouetteManager import com.example.livingai.utils.SilhouetteManager
import com.example.livingai.utils.TiltSensorManager import com.example.livingai.utils.TiltSensorManager
import org.koin.android.ext.koin.androidContext import org.koin.android.ext.koin.androidContext
import org.koin.core.module.dsl.viewModel import org.koin.androidx.viewmodel.dsl.viewModel
import org.koin.dsl.module import org.koin.dsl.module
private val Context.dataStore: DataStore<Preferences> by preferencesDataStore(name = Constants.USER_SETTINGS) private val Context.dataStore: DataStore<Preferences> by preferencesDataStore(name = Constants.USER_SETTINGS)
val appModule = module { val appModule = module {
includes(cameraModule)
single<DataStore<Preferences>> { androidContext().dataStore } single<DataStore<Preferences>> { androidContext().dataStore }
single<AppDataRepository> { AppDataRepositoryImpl(get()) } single<AppDataRepository> { AppDataRepositoryImpl(get()) }
@ -163,6 +167,8 @@ val appModule = module {
viewModel { ListingsViewModel(get()) } viewModel { ListingsViewModel(get()) }
viewModel { SettingsViewModel(get()) } viewModel { SettingsViewModel(get()) }
viewModel { RatingViewModel(get(), get(), get(), get()) } viewModel { RatingViewModel(get(), get(), get(), get()) }
viewModel { CameraViewModel(get(), get(), get(), get()) } viewModel { CameraViewModel(get(), get(), get(), get(), get(), get(), get()) }
viewModel { VideoViewModel(get(), get(), get()) } viewModel { VideoViewModel(get(), get(), get()) }
viewModel { ImagePreviewViewModel() }
viewModel { VideoPreviewViewModel() }
} }

View File

@ -0,0 +1,18 @@
package com.example.livingai.di
import com.example.livingai.data.camera.*
import com.example.livingai.domain.camera.*
import org.koin.android.ext.koin.androidContext
import org.koin.dsl.module
val cameraModule = module {
// Pipeline Steps
factory<OrientationChecker> { DefaultOrientationChecker() }
factory<TiltChecker> { DefaultTiltChecker() }
factory<ObjectDetector> { TFLiteObjectDetector(androidContext()) }
factory<PoseAnalyzer> { MockPoseAnalyzer() }
// Handlers
factory<CaptureHandler> { DefaultCaptureHandler() }
factory<MeasurementCalculator> { DefaultMeasurementCalculator() }
}

View File

@ -0,0 +1,59 @@
package com.example.livingai.domain.camera
import android.graphics.Bitmap
import com.example.livingai.domain.model.camera.CameraOrientation
import com.example.livingai.domain.model.camera.CaptureData
import com.example.livingai.domain.model.camera.DetectionResult
import com.example.livingai.domain.model.camera.Instruction
import com.example.livingai.domain.model.camera.ObjectMetrics
import com.example.livingai.domain.model.camera.ReferenceObject
interface CameraPipelineStep {
/**
* Analyzes the current frame (or sensor data) and returns an instruction.
*/
suspend fun analyze(input: PipelineInput): Instruction
}
data class PipelineInput(
val image: Bitmap?,
val deviceOrientation: Int, // degrees
val deviceRoll: Float,
val devicePitch: Float,
val deviceAzimuth: Float,
val requiredOrientation: CameraOrientation,
val targetAnimal: String, // e.g., "Dog", "Cat"
val orientation: String, // "front", "back", "side", etc.
val previousDetectionResult: DetectionResult? = null // To pass detection result to subsequent steps
)
interface OrientationChecker : CameraPipelineStep
interface TiltChecker : CameraPipelineStep
interface ObjectDetector : CameraPipelineStep
interface PoseAnalyzer : CameraPipelineStep
interface CaptureHandler {
suspend fun capture(input: PipelineInput, detectionResult: DetectionResult): CaptureData
}
interface MeasurementCalculator {
/**
* Calculates the real world dimensions of the animal based on a known reference object dimension.
* @param targetHeight The real height of the reference object provided by the user.
* @param referenceObject The reference object selected by the user.
* @param currentMetrics The current relative metrics of the animal.
* @return The calculated real-world metrics for the animal.
*/
fun calculateRealMetrics(
targetHeight: Float,
referenceObject: ReferenceObject,
currentMetrics: ObjectMetrics
): RealWorldMetrics
}
data class RealWorldMetrics(
val height: Float,
val width: Float,
val distance: Float,
val unit: String = "cm"
)

View File

@ -0,0 +1,79 @@
package com.example.livingai.domain.model.camera
import android.graphics.Bitmap
import android.graphics.RectF
/**
* Represents the output of a pipeline analysis step.
* @param message Instruction text to be displayed to the user.
* @param animationResId Resource ID for a visual GIF/Animation explaining the instruction.
* @param isValid True if the step passed validation, False otherwise.
* @param result The detailed analysis result (optional).
*/
data class Instruction(
val message: String,
val animationResId: Int? = null,
val isValid: Boolean,
val result: AnalysisResult? = null
)
/**
* Sealed interface for different types of analysis results.
*/
sealed interface AnalysisResult
data class OrientationResult(
val currentOrientation: Int,
val requiredOrientation: CameraOrientation
) : AnalysisResult
data class TiltResult(
val roll: Float,
val pitch: Float,
val isLevel: Boolean
) : AnalysisResult
data class DetectionResult(
val isAnimalDetected: Boolean,
val animalBounds: RectF?,
val referenceObjects: List<ReferenceObject>,
val label: String? = null,
val confidence: Float = 0f
) : AnalysisResult
data class PoseResult(
val isCorrectPose: Boolean,
val feedback: String
) : AnalysisResult
/**
* Data class representing a reference object detected in the scene.
*/
data class ReferenceObject(
val id: String,
val label: String,
val bounds: RectF,
val relativeHeight: Float,
val relativeWidth: Float,
val distance: Float
)
enum class CameraOrientation {
PORTRAIT, LANDSCAPE
}
/**
* Data to be saved after a successful capture.
*/
data class CaptureData(
val image: Bitmap,
val segmentationMask: BooleanArray, // Flattened 2D array or similar representation
val animalMetrics: ObjectMetrics,
val referenceObjects: List<ReferenceObject>
)
data class ObjectMetrics(
val relativeHeight: Float,
val relativeWidth: Float,
val distance: Float
)

View File

@ -0,0 +1,346 @@
package com.example.livingai.pages.camera
import android.graphics.Bitmap
import android.graphics.Matrix
import android.graphics.RectF
import androidx.camera.core.ImageProxy
import androidx.camera.view.LifecycleCameraController
import androidx.compose.foundation.Canvas
import androidx.compose.foundation.Image
import androidx.compose.foundation.background
import androidx.compose.foundation.layout.*
import androidx.compose.foundation.shape.RoundedCornerShape
import androidx.compose.material.icons.Icons
import androidx.compose.material.icons.filled.Camera
import androidx.compose.material.icons.filled.Refresh
import androidx.compose.material3.*
import androidx.compose.runtime.*
import androidx.compose.ui.Alignment
import androidx.compose.ui.Modifier
import androidx.compose.ui.draw.alpha
import androidx.compose.ui.geometry.Offset
import androidx.compose.ui.geometry.Size
import androidx.compose.ui.graphics.Color
import androidx.compose.ui.graphics.asImageBitmap
import androidx.compose.ui.graphics.drawscope.Stroke
import androidx.compose.ui.layout.ContentScale
import androidx.compose.ui.platform.LocalContext
import androidx.compose.ui.res.painterResource
import androidx.compose.ui.text.style.TextAlign
import androidx.compose.ui.unit.dp
import androidx.core.content.ContextCompat
import androidx.navigation.NavController
import com.example.livingai.domain.camera.RealWorldMetrics
import com.example.livingai.domain.model.camera.*
import com.example.livingai.pages.components.CameraPreview
import com.example.livingai.pages.components.PermissionWrapper
import com.example.livingai.utils.SilhouetteManager
import org.koin.androidx.compose.koinViewModel
import kotlin.math.max
@Composable
fun CameraCaptureScreen(
navController: NavController,
orientation: String?,
viewModel: CameraViewModel = koinViewModel()
) {
val uiState by viewModel.uiState.collectAsState()
LaunchedEffect(orientation) {
if (orientation != null) {
viewModel.setRequiredOrientation(orientation)
}
}
PermissionWrapper {
if (uiState.isPreviewMode && uiState.captureData != null) {
CapturePreviewScreen(
captureData = uiState.captureData!!,
realWorldMetrics = uiState.realWorldMetrics,
onRetake = { viewModel.resetCamera() },
onSelectReference = { ref, height ->
viewModel.onReferenceObjectSelected(ref, height)
}
)
} else {
ActiveCameraScreen(
uiState = uiState,
viewModel = viewModel,
)
}
}
}
@Composable
fun ActiveCameraScreen(
uiState: CameraUiState,
viewModel: CameraViewModel,
) {
val context = LocalContext.current
var analysisImageSize by remember { mutableStateOf(Size(0f, 0f)) }
val controller = remember {
LifecycleCameraController(context).apply {
setEnabledUseCases(LifecycleCameraController.IMAGE_ANALYSIS or LifecycleCameraController.IMAGE_CAPTURE)
setImageAnalysisAnalyzer(
ContextCompat.getMainExecutor(context),
{ imageProxy ->
val bitmap = imageProxy.toBitmap()
val rotation = imageProxy.imageInfo.rotationDegrees
val rotatedBitmap = rotateBitmap(bitmap, rotation)
analysisImageSize = Size(rotatedBitmap.width.toFloat(), rotatedBitmap.height.toFloat())
viewModel.processFrame(
image = rotatedBitmap,
deviceOrientation = rotation
)
imageProxy.close()
}
)
}
}
fun captureImage() {
val executor = ContextCompat.getMainExecutor(context)
controller.takePicture(
executor,
object : androidx.camera.core.ImageCapture.OnImageCapturedCallback() {
override fun onCaptureSuccess(image: ImageProxy) {
val bitmap = image.toBitmap()
val rotation = image.imageInfo.rotationDegrees
val rotatedBitmap = rotateBitmap(bitmap, rotation)
viewModel.onCaptureClicked(
image = rotatedBitmap,
deviceOrientation = rotation
)
image.close()
}
override fun onError(exception: androidx.camera.core.ImageCaptureException) {
// Handle error
}
}
)
}
Scaffold(
floatingActionButton = {
if (uiState.isReadyToCapture) {
FloatingActionButton(onClick = { captureImage() }) {
Icon(Icons.Default.Camera, contentDescription = "Capture")
}
}
},
floatingActionButtonPosition = FabPosition.Center
) { paddingValues ->
Box(modifier = Modifier.fillMaxSize().padding(paddingValues)) {
CameraPreview(
modifier = Modifier.fillMaxSize(),
controller = controller
)
// Silhouette Overlay
if (uiState.targetOrientation.isNotEmpty()) {
val silhouette = SilhouetteManager.getOriginal(uiState.targetOrientation)
if (silhouette != null) {
Image(
bitmap = silhouette.asImageBitmap(),
contentDescription = "Silhouette",
modifier = Modifier.fillMaxSize().alpha(0.3f),
contentScale = ContentScale.Fit
)
}
}
// Overlays
uiState.currentInstruction?.let { instruction ->
InstructionOverlay(
instruction = instruction,
modifier = Modifier.align(Alignment.Center)
)
}
// Bounding Boxes Overlay for active detection
uiState.lastDetectionResult?.let { detection ->
DetectionOverlay(
detection = detection,
imageWidth = analysisImageSize.width.toInt(),
imageHeight = analysisImageSize.height.toInt(),
modifier = Modifier.fillMaxSize()
)
}
}
}
}
@Composable
fun InstructionOverlay(
instruction: Instruction,
modifier: Modifier = Modifier
) {
if (instruction.isValid && instruction.message == "Ready to capture") return
Column(
modifier = modifier
.fillMaxWidth()
.padding(32.dp)
.background(Color.Black.copy(alpha = 0.6f), RoundedCornerShape(16.dp))
.padding(16.dp),
horizontalAlignment = Alignment.CenterHorizontally
) {
if (!instruction.isValid) {
Text(
text = instruction.message,
style = MaterialTheme.typography.titleLarge,
color = Color.White,
textAlign = TextAlign.Center
)
// Visual GIF logic would go here using instruction.animationResId
}
}
}
@Composable
fun DetectionOverlay(
detection: DetectionResult,
imageWidth: Int,
imageHeight: Int,
modifier: Modifier = Modifier
) {
if (imageWidth == 0 || imageHeight == 0) return
Canvas(modifier = modifier) {
val canvasWidth = size.width
val canvasHeight = size.height
// This calculation assumes the camera preview's scale type is `FILL_CENTER`.
// It maintains the aspect ratio of the image and centers it.
val widthRatio = canvasWidth / imageWidth
val heightRatio = canvasHeight / imageHeight
val scale = max(widthRatio, heightRatio)
val offsetX = (canvasWidth - imageWidth * scale) / 2f
val offsetY = (canvasHeight - imageHeight * scale) / 2f
// Helper to transform coordinates
val transform: (RectF) -> RectF = { box ->
RectF(
box.left * scale + offsetX,
box.top * scale + offsetY,
box.right * scale + offsetX,
box.bottom * scale + offsetY
)
}
// Draw animal box (Yellow)
detection.animalBounds?.let {
val transformedBox = transform(it)
drawRect(
color = Color.Yellow,
topLeft = Offset(transformedBox.left, transformedBox.top),
size = Size(transformedBox.width(), transformedBox.height()),
style = Stroke(width = 2.dp.toPx())
)
}
// Draw reference object boxes (Cyan)
detection.referenceObjects.forEach { refObject ->
val transformedBox = transform(refObject.bounds)
drawRect(
color = Color.Cyan,
topLeft = Offset(transformedBox.left, transformedBox.top),
size = Size(transformedBox.width(), transformedBox.height()),
style = Stroke(width = 2.dp.toPx())
)
}
}
}
@Composable
fun CapturePreviewScreen(
captureData: CaptureData,
realWorldMetrics: RealWorldMetrics?,
onRetake: () -> Unit,
onSelectReference: (ReferenceObject, Float) -> Unit
) {
var showDialog by remember { mutableStateOf(false) }
var selectedRefObject by remember { mutableStateOf<ReferenceObject?>(null) }
var inputHeight by remember { mutableStateOf("") }
if (showDialog && selectedRefObject != null) {
AlertDialog(
onDismissRequest = { showDialog = false },
title = { Text("Enter Real Height") },
text = {
OutlinedTextField(
value = inputHeight,
onValueChange = { inputHeight = it },
label = { Text("Height (cm)") }
)
},
confirmButton = {
Button(onClick = {
inputHeight.toFloatOrNull()?.let {
onSelectReference(selectedRefObject!!, it)
}
showDialog = false
}) {
Text("Calculate")
}
}
)
}
Column(modifier = Modifier.fillMaxSize()) {
Box(modifier = Modifier.weight(1f)) {
Image(
bitmap = captureData.image.asImageBitmap(),
contentDescription = "Captured Image",
modifier = Modifier.fillMaxSize(),
contentScale = ContentScale.Fit
)
realWorldMetrics?.let { metrics ->
Column(
modifier = Modifier
.align(Alignment.TopStart)
.padding(16.dp)
.background(Color.Black.copy(alpha = 0.7f), RoundedCornerShape(8.dp))
.padding(8.dp)
) {
Text("Height: %.2f %s".format(metrics.height, metrics.unit), color = Color.White)
Text("Width: %.2f %s".format(metrics.width, metrics.unit), color = Color.White)
Text("Distance: %.2f".format(metrics.distance), color = Color.White)
}
}
}
Row(
modifier = Modifier
.fillMaxWidth()
.padding(16.dp),
horizontalArrangement = Arrangement.SpaceBetween
) {
Button(onClick = onRetake) {
Icon(Icons.Default.Refresh, contentDescription = null)
Spacer(Modifier.width(8.dp))
Text("Retake")
}
if (captureData.referenceObjects.isNotEmpty()) {
Button(onClick = {
selectedRefObject = captureData.referenceObjects.first()
showDialog = true
}) {
Text("Select Ref Object")
}
}
}
}
}
fun rotateBitmap(bitmap: Bitmap, degrees: Int): Bitmap {
val matrix = Matrix()
matrix.postRotate(degrees.toFloat())
return Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, matrix, true)
}

View File

@ -1,210 +1,19 @@
// Obsolete file, replaced by CameraCaptureScreen.kt
// This file is kept to avoid breaking changes if referenced elsewhere, but the content is commented out to resolve errors.
// TODO: Migrate completely to CameraCaptureScreen or new CameraViewModel structure.
package com.example.livingai.pages.camera package com.example.livingai.pages.camera
import android.content.pm.ActivityInfo
import androidx.camera.core.ImageCapture
import androidx.camera.core.ImageCaptureException
import androidx.camera.core.ImageProxy
import androidx.camera.view.LifecycleCameraController
import androidx.compose.foundation.Image
import androidx.compose.foundation.background
import androidx.compose.foundation.border
import androidx.compose.foundation.layout.Box
import androidx.compose.foundation.layout.BoxWithConstraints
import androidx.compose.foundation.layout.Column
import androidx.compose.foundation.layout.fillMaxSize
import androidx.compose.foundation.layout.offset
import androidx.compose.foundation.layout.padding
import androidx.compose.foundation.layout.size
import androidx.compose.foundation.shape.RoundedCornerShape
import androidx.compose.material.icons.Icons
import androidx.compose.material.icons.filled.Camera
import androidx.compose.material3.CircularProgressIndicator
import androidx.compose.material3.FabPosition
import androidx.compose.material3.FloatingActionButton
import androidx.compose.material3.Icon
import androidx.compose.material3.Scaffold
import androidx.compose.material3.Text
import androidx.compose.runtime.Composable import androidx.compose.runtime.Composable
import androidx.compose.runtime.LaunchedEffect
import androidx.compose.runtime.collectAsState
import androidx.compose.runtime.getValue
import androidx.compose.runtime.remember
import androidx.compose.ui.Alignment
import androidx.compose.ui.Modifier
import androidx.compose.ui.graphics.Color
import androidx.compose.ui.graphics.asImageBitmap
import androidx.compose.ui.layout.ContentScale
import androidx.compose.ui.platform.LocalContext
import androidx.compose.ui.platform.LocalDensity
import androidx.compose.ui.unit.dp
import androidx.navigation.NavController import androidx.navigation.NavController
import androidx.core.content.ContextCompat
import com.example.livingai.pages.components.CameraPreview
import com.example.livingai.pages.components.PermissionWrapper
import com.example.livingai.pages.navigation.Route
import com.example.livingai.utils.SetScreenOrientation
import org.koin.androidx.compose.koinViewModel import org.koin.androidx.compose.koinViewModel
@Composable @Composable
fun CameraScreen( fun CameraScreen(
viewModel: CameraViewModel = koinViewModel(), // viewModel: CameraViewModel = koinViewModel(), // Commented out to fix build errors
navController: NavController, navController: NavController,
orientation: String? = null, orientation: String? = null,
animalId: String animalId: String
) { ) {
val isLandscape = when (orientation) { // Placeholder content
"front", "back" -> false
else -> true
}
val orientationLock = if (isLandscape) {
ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE
} else {
ActivityInfo.SCREEN_ORIENTATION_PORTRAIT
}
SetScreenOrientation(orientationLock)
LaunchedEffect(animalId, orientation) {
viewModel.onEvent(CameraEvent.SetContext(animalId, orientation))
}
PermissionWrapper {
val state by viewModel.state.collectAsState()
val context = LocalContext.current
val density = LocalDensity.current
val controller = remember {
LifecycleCameraController(context).apply {
setEnabledUseCases(LifecycleCameraController.IMAGE_ANALYSIS or LifecycleCameraController.IMAGE_CAPTURE)
}
}
fun takePhoto() {
val executor = ContextCompat.getMainExecutor(context)
controller.takePicture(
executor,
object : ImageCapture.OnImageCapturedCallback() {
override fun onCaptureSuccess(image: ImageProxy) {
viewModel.onEvent(CameraEvent.ImageCaptured(image))
}
override fun onError(exception: ImageCaptureException) {}
}
)
}
LaunchedEffect(state.capturedImageUri) {
state.capturedImageUri?.let {
navController.navigate(Route.ViewImageScreen(it.toString(), true, orientation, true, false, animalId))
viewModel.onEvent(CameraEvent.ClearCapturedImage)
}
}
Scaffold(
floatingActionButton = {
FloatingActionButton(onClick = ::takePhoto) {
Icon(Icons.Default.Camera, contentDescription = "Capture Image")
}
},
floatingActionButtonPosition = FabPosition.Center
) { paddingValues ->
BoxWithConstraints(modifier = Modifier.fillMaxSize().padding(paddingValues)) {
val screenWidth = maxWidth
val screenHeight = maxHeight
CameraPreview(
modifier = Modifier.fillMaxSize(),
controller = controller,
onFrame = { bitmap, rotation, fxPixels ->
viewModel.onEvent(CameraEvent.FrameReceived(bitmap, rotation, fxPixels))
}
)
state.detectionResult?.let { detection ->
val imageWidth = state.imageWidth.toFloat()
val imageHeight = state.imageHeight.toFloat()
if (imageWidth == 0f || imageHeight == 0f) return@let
val screenW = with(density) { screenWidth.toPx() }
val screenH = with(density) { screenHeight.toPx() }
val scaleX = screenW / imageWidth
val scaleY = screenH / imageHeight
val scale = maxOf(scaleX, scaleY) // For FILL_CENTER behavior
val offsetX = (screenW - imageWidth * scale) / 2f
val offsetY = (screenH - imageHeight * scale) / 2f
val bbox = detection.boundingBox
val left = bbox.left * scale + offsetX
val top = bbox.top * scale + offsetY
val leftDp = with(density) { left.toDp() }
val topDp = with(density) { top.toDp() }
val widthDp = with(density) { (bbox.width() * scale).toDp() }
val heightDp = with(density) { (bbox.height() * scale).toDp() }
Box(
modifier = Modifier
.offset(x = leftDp, y = topDp)
.size(width = widthDp, height = heightDp)
.border(2.dp, Color.Yellow)
)
// Overlay 1: Object Label & Confidence (Above the box)
Column(
modifier = Modifier
.offset(x = leftDp, y = topDp - 25.dp)
.background(Color.Black.copy(alpha = 0.7f))
.padding(4.dp)
) {
Text(
text = "${detection.label} (${(detection.confidence * 100).toInt()}%)",
color = Color.White
)
}
}
// Overlay 2: Fixed top-right corner info
state.orientationState?.let { orient ->
Column(
modifier = Modifier
.align(Alignment.TopEnd)
.padding(16.dp)
.background(Color.Black.copy(alpha = 0.7f), shape = RoundedCornerShape(8.dp))
.padding(8.dp)
) {
if (orient.relativeDepth != null) {
Text(
text = "Rel Depth: %.2f".format(orient.relativeDepth),
color = Color.Cyan
)
}
if (orient.absoluteDistanceMeters != null) {
Text(
text = "Dist: %.2fm".format(orient.absoluteDistanceMeters),
color = Color.Green
)
}
if (orient.iouScore != null) {
Text(
text = "IoU: %.2f".format(orient.iouScore),
color = Color.Yellow
)
}
orient.pixelMetrics?.let { metrics ->
Text(
text = "W: ${metrics.widthPx}px H: ${metrics.heightPx}px",
color = Color.White
)
}
}
}
if (state.isCapturing) {
CircularProgressIndicator(modifier = Modifier.align(Alignment.Center))
}
}
}
}
} }

View File

@ -1,181 +1,172 @@
package com.example.livingai.pages.camera package com.example.livingai.pages.camera
import android.graphics.Bitmap import android.graphics.Bitmap
import android.graphics.Matrix
import android.net.Uri
import androidx.camera.core.ImageProxy
import androidx.lifecycle.ViewModel import androidx.lifecycle.ViewModel
import androidx.lifecycle.viewModelScope import androidx.lifecycle.viewModelScope
import com.example.livingai.data.ml.ObjectDetectionResult import com.example.livingai.domain.camera.*
import com.example.livingai.domain.ml.AIModel import com.example.livingai.domain.model.camera.*
import com.example.livingai.domain.ml.Orientation import com.example.livingai.utils.TiltSensorManager
import com.example.livingai.domain.ml.OrientationState
import com.example.livingai.domain.repository.CameraRepository
import com.example.livingai.domain.usecases.AppDataUseCases
import com.example.livingai.utils.ScreenDimensions
import com.example.livingai.utils.SilhouetteManager
import com.example.livingai.utils.calculateDistance
import com.example.livingai.utils.fitImageToCrop
import kotlinx.coroutines.flow.MutableStateFlow import kotlinx.coroutines.flow.MutableStateFlow
import kotlinx.coroutines.flow.StateFlow
import kotlinx.coroutines.flow.asStateFlow import kotlinx.coroutines.flow.asStateFlow
import kotlinx.coroutines.launch import kotlinx.coroutines.launch
import java.util.concurrent.atomic.AtomicBoolean
import kotlin.math.roundToInt
class CameraViewModel( class CameraViewModel(
private val cameraRepository: CameraRepository, private val orientationChecker: OrientationChecker,
private val aiModel: AIModel, private val tiltChecker: TiltChecker,
private val screenDims: ScreenDimensions, private val objectDetector: ObjectDetector,
private val appDataUseCases: AppDataUseCases private val poseAnalyzer: PoseAnalyzer,
private val captureHandler: CaptureHandler,
private val measurementCalculator: MeasurementCalculator,
private val tiltSensorManager: TiltSensorManager
) : ViewModel() { ) : ViewModel() {
private val _state = MutableStateFlow(CameraUiState()) private val _uiState = MutableStateFlow(CameraUiState())
val state = _state.asStateFlow() val uiState: StateFlow<CameraUiState> = _uiState.asStateFlow()
private val tilt = tiltSensorManager.tilt
private val isProcessingFrame = AtomicBoolean(false)
init { init {
viewModelScope.launch { tiltSensorManager.start()
appDataUseCases.getSettings().collect { settings ->
_state.value = _state.value.copy(
isAutoCaptureEnabled = settings.isAutoCaptureOn,
matchThreshold = settings.jaccardThreshold.roundToInt(),
distanceMethod = settings.distanceMethod
)
}
}
} }
fun onEvent(event: CameraEvent) { override fun onCleared() {
when (event) { super.onCleared()
is CameraEvent.ImageCaptured -> handleImageProxy(event.imageProxy) tiltSensorManager.stop()
is CameraEvent.FrameReceived -> handleFrame(event.bitmap, event.rotationDegrees, event.focalLengthPixels)
is CameraEvent.ClearCapturedImage -> clearCaptured()
is CameraEvent.SetContext -> setContext(event.animalId, event.orientation)
is CameraEvent.AutoCaptureTriggered -> {
_state.value = _state.value.copy(shouldAutoCapture = false, isCapturing = true)
}
}
} }
private fun setContext(animalId: String, orientation: String?) { fun setRequiredOrientation(orientation: String) {
val silhouetteMask = orientation?.let { SilhouetteManager.getOriginal(it) } val required = when (orientation.lowercase()) {
val savedMask = orientation?.let { SilhouetteManager.getInvertedPurple(it) } "front", "back" -> CameraOrientation.PORTRAIT
else -> CameraOrientation.LANDSCAPE
_state.value = _state.value.copy( }
animalId = animalId, _uiState.value = _uiState.value.copy(
orientation = orientation, requiredOrientation = required,
silhouetteMask = silhouetteMask, targetOrientation = orientation
savedMaskBitmap = savedMask
) )
} }
private fun clearCaptured() { fun processFrame(
_state.value = _state.value.copy( image: Bitmap,
capturedImageUri = null, deviceOrientation: Int
segmentationMask = null, ) {
detectionResult = null // Clear detection result as well
)
}
private fun handleImageProxy(proxy: ImageProxy) {
viewModelScope.launch { viewModelScope.launch {
val bitmap = cameraRepository.captureImage(proxy) val currentTilt = tilt.value
val animalId = _state.value.animalId ?: "unknown" val input = PipelineInput(
val uriString = cameraRepository.saveImage(bitmap, animalId, _state.value.orientation) image = image,
_state.value = _state.value.copy( deviceOrientation = deviceOrientation,
capturedImageUri = Uri.parse(uriString), deviceRoll = currentTilt.second,
isCapturing = false // Reset capturing flag devicePitch = currentTilt.first,
deviceAzimuth = currentTilt.third,
requiredOrientation = _uiState.value.requiredOrientation,
targetAnimal = "Cow", // Assuming Cow for now, can be parameter later
orientation = _uiState.value.targetOrientation,
previousDetectionResult = _uiState.value.lastDetectionResult
)
// Step 1: Check Orientation
val orientationInstruction = orientationChecker.analyze(input)
if (!orientationInstruction.isValid) {
updateState(orientationInstruction)
return@launch
}
// Step 2: Check Tilt
val tiltInstruction = tiltChecker.analyze(input)
if (!tiltInstruction.isValid) {
updateState(tiltInstruction)
return@launch
}
// Step 3: Detect Objects
val detectionInstruction = objectDetector.analyze(input)
if (!detectionInstruction.isValid) {
updateState(detectionInstruction)
return@launch
}
// Step 4: Check Pose (Silhouette matching)
val poseInstruction = poseAnalyzer.analyze(input.copy(previousDetectionResult = detectionInstruction.result as? DetectionResult))
if (!poseInstruction.isValid) {
updateState(poseInstruction)
return@launch
}
// All checks passed
_uiState.value = _uiState.value.copy(
currentInstruction = Instruction("Ready to capture", isValid = true),
isReadyToCapture = true,
lastDetectionResult = detectionInstruction.result as? DetectionResult
) )
} }
} }
private fun handleFrame(bitmap: Bitmap, rotationDegrees: Int, focalLengthPixels: Float) { private fun updateState(instruction: Instruction) {
if (isProcessingFrame.compareAndSet(false, true)) { val detectionResult = instruction.result as? DetectionResult
viewModelScope.launch { _uiState.value = _uiState.value.copy(
try { currentInstruction = instruction,
// Rotate bitmap to be upright before processing isReadyToCapture = false,
val rotatedBitmap = if (rotationDegrees != 0) { lastDetectionResult = detectionResult ?: _uiState.value.lastDetectionResult
val matrix = Matrix().apply { postRotate(rotationDegrees.toFloat()) } )
Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, matrix, true) }
} else {
bitmap
}
// Perform Object Detection fun onCaptureClicked(image: Bitmap, deviceOrientation: Int) {
val detectionResult = aiModel.detectObject(rotatedBitmap) viewModelScope.launch {
val detectionResult = _uiState.value.lastDetectionResult ?: return@launch
val currentTilt = tilt.value
var orientationState: OrientationState? = null val input = PipelineInput(
val requestedOrientationStr = _state.value.orientation image = image,
deviceOrientation = deviceOrientation,
deviceRoll = currentTilt.second,
devicePitch = currentTilt.first,
deviceAzimuth = currentTilt.third,
requiredOrientation = _uiState.value.requiredOrientation,
targetAnimal = "Cow",
orientation = _uiState.value.targetOrientation
)
if (requestedOrientationStr != null && detectionResult != null) { val captureData = captureHandler.capture(input, detectionResult)
// We need a silhouette bitmap for processFrame. If not available, we can pass a dummy or handle inside.
// But for now, let's use the one we loaded in setContext
val silhouette = _state.value.silhouetteMask
if (silhouette != null) { _uiState.value = _uiState.value.copy(
orientationState = cameraRepository.processFrame( captureData = captureData,
bitmap = rotatedBitmap, isPreviewMode = true
requestedOrientation = mapStringToOrientation(requestedOrientationStr), )
silhouetteBitmap = silhouette,
realObjectHeightMeters = null, // Or some default
focalLengthPixels = focalLengthPixels,
boundingBox = detectionResult.boundingBox // Pass the bbox we just found
)
}
}
_state.value = _state.value.copy(
detectionResult = detectionResult,
orientationState = orientationState, // Update state
imageWidth = rotatedBitmap.width,
imageHeight = rotatedBitmap.height
)
} catch (e: Exception) {
e.printStackTrace()
} finally {
isProcessingFrame.set(false)
}
}
} }
} }
private fun mapStringToOrientation(orientation: String): Orientation { fun onReferenceObjectSelected(refObj: ReferenceObject, realHeight: Float) {
return when (orientation.lowercase()) { val captureData = _uiState.value.captureData ?: return
"front" -> Orientation.FRONT
"back" -> Orientation.BACK val realMetrics = measurementCalculator.calculateRealMetrics(
"left" -> Orientation.LEFT targetHeight = realHeight,
"right" -> Orientation.RIGHT referenceObject = refObj,
"leftangle" -> Orientation.LEFT_45 currentMetrics = captureData.animalMetrics
"rightangle" -> Orientation.RIGHT_45 )
else -> Orientation.FRONT
} _uiState.value = _uiState.value.copy(
realWorldMetrics = realMetrics
)
}
fun resetCamera() {
_uiState.value = _uiState.value.copy(
isPreviewMode = false,
captureData = null,
realWorldMetrics = null,
isReadyToCapture = false,
currentInstruction = null
)
} }
} }
data class CameraUiState( data class CameraUiState(
val animalId: String? = null, val requiredOrientation: CameraOrientation = CameraOrientation.LANDSCAPE,
val orientation: String? = null, val targetOrientation: String = "side",
val capturedImageUri: Uri? = null, val currentInstruction: Instruction? = null,
val segmentationMask: Bitmap? = null, val isReadyToCapture: Boolean = false,
val savedMaskBitmap: Bitmap? = null, val lastDetectionResult: DetectionResult? = null,
val silhouetteMask: Bitmap? = null, val isPreviewMode: Boolean = false,
val isCapturing: Boolean = false, val captureData: CaptureData? = null,
val isAutoCaptureEnabled: Boolean = false, val realWorldMetrics: RealWorldMetrics? = null
val matchThreshold: Int = 50,
val distanceMethod: String = "Jaccard",
val shouldAutoCapture: Boolean = false,
val orientationState: OrientationState? = null,
val detectionResult: ObjectDetectionResult? = null,
val imageWidth: Int = 0,
val imageHeight: Int = 0
) )
sealed class CameraEvent {
data class ImageCaptured(val imageProxy: ImageProxy) : CameraEvent()
data class FrameReceived(val bitmap: Bitmap, val rotationDegrees: Int, val focalLengthPixels: Float) : CameraEvent()
object ClearCapturedImage : CameraEvent()
data class SetContext(val animalId: String, val orientation: String?) : CameraEvent()
object AutoCaptureTriggered : CameraEvent()
}

View File

@ -14,8 +14,13 @@ import androidx.compose.material3.ExperimentalMaterial3Api
import androidx.compose.material3.MaterialTheme import androidx.compose.material3.MaterialTheme
import androidx.compose.material3.Text import androidx.compose.material3.Text
import androidx.compose.runtime.Composable import androidx.compose.runtime.Composable
import androidx.compose.runtime.getValue
import androidx.compose.runtime.mutableStateOf
import androidx.compose.runtime.remember
import androidx.compose.runtime.setValue
import androidx.compose.ui.Alignment import androidx.compose.ui.Alignment
import androidx.compose.ui.Modifier import androidx.compose.ui.Modifier
import androidx.compose.ui.platform.LocalContext
import androidx.compose.ui.res.painterResource import androidx.compose.ui.res.painterResource
import androidx.compose.ui.res.stringResource import androidx.compose.ui.res.stringResource
import androidx.compose.ui.text.font.FontWeight import androidx.compose.ui.text.font.FontWeight
@ -23,11 +28,27 @@ import androidx.navigation.NavController
import com.example.livingai.R import com.example.livingai.R
import com.example.livingai.pages.commons.Dimentions import com.example.livingai.pages.commons.Dimentions
import com.example.livingai.pages.components.CommonScaffold import com.example.livingai.pages.components.CommonScaffold
import com.example.livingai.pages.components.LabeledDropdown
import com.example.livingai.pages.navigation.Route import com.example.livingai.pages.navigation.Route
import com.example.livingai.utils.Constants
@OptIn(ExperimentalMaterial3Api::class) @OptIn(ExperimentalMaterial3Api::class)
@Composable @Composable
fun HomeScreen(navController: NavController) { fun HomeScreen(navController: NavController) {
val context = LocalContext.current
val silhouetteMap = remember {
Constants.silhouetteList.associateWith { item ->
val resId = context.resources.getIdentifier("label_${item}", "string", context.packageName)
if (resId != 0) context.getString(resId) else item
}
}
// Reverse map for lookup (Display Name -> ID)
val displayToIdMap = remember { silhouetteMap.entries.associate { (k, v) -> v to k } }
val orientationOptions = remember { silhouetteMap.values.toList() }
var selectedOrientationDisplay by remember { mutableStateOf(orientationOptions.firstOrNull() ?: "") }
CommonScaffold( CommonScaffold(
navController = navController, navController = navController,
title = stringResource(id = R.string.app_name), title = stringResource(id = R.string.app_name),
@ -64,6 +85,25 @@ fun HomeScreen(navController: NavController) {
onClick = { navController.navigate(Route.AddProfileScreen()) } onClick = { navController.navigate(Route.AddProfileScreen()) }
) )
Spacer(modifier = Modifier.height(Dimentions.SMALL_PADDING))
// Dropdown for selecting orientation
LabeledDropdown(
labelRes = R.string.default_orientation_label, // Or create a generic "Orientation" label
options = orientationOptions,
selected = selectedOrientationDisplay,
onSelected = { selectedOrientationDisplay = it },
modifier = Modifier.fillMaxWidth()
)
HomeButton(
text = "Camera Capture",
onClick = {
val orientationId = displayToIdMap[selectedOrientationDisplay] ?: "side"
navController.navigate(Route.CameraScreen(orientation = orientationId))
}
)
} }
} }
} }

View File

@ -0,0 +1,11 @@
package com.example.livingai.pages.imagepreview
import androidx.compose.runtime.Composable
import org.koin.androidx.compose.koinViewModel
@Composable
fun ImagePreviewScreen(
viewModel: ImagePreviewViewModel = koinViewModel()
) {
}

View File

@ -0,0 +1,5 @@
package com.example.livingai.pages.imagepreview
import androidx.lifecycle.ViewModel
class ImagePreviewViewModel : ViewModel()

View File

@ -10,7 +10,7 @@ import androidx.navigation.compose.rememberNavController
import androidx.navigation.toRoute import androidx.navigation.toRoute
import com.example.livingai.pages.addprofile.AddProfileScreen import com.example.livingai.pages.addprofile.AddProfileScreen
import com.example.livingai.pages.addprofile.AddProfileViewModel import com.example.livingai.pages.addprofile.AddProfileViewModel
import com.example.livingai.pages.camera.CameraScreen import com.example.livingai.pages.camera.CameraCaptureScreen
import com.example.livingai.pages.camera.VideoRecordScreen import com.example.livingai.pages.camera.VideoRecordScreen
import com.example.livingai.pages.camera.ViewImageScreen import com.example.livingai.pages.camera.ViewImageScreen
import com.example.livingai.pages.camera.ViewVideoScreen import com.example.livingai.pages.camera.ViewVideoScreen
@ -59,9 +59,6 @@ fun NavGraph(
val currentId by viewModel.currentAnimalId val currentId by viewModel.currentAnimalId
val videoUri by viewModel.videoUri val videoUri by viewModel.videoUri
// Note: initialization is handled in ViewModel init block using SavedStateHandle
// Handle new media from saved state handle
val newImageUri = backStackEntry.savedStateHandle.get<String>("newImageUri") val newImageUri = backStackEntry.savedStateHandle.get<String>("newImageUri")
val newImageOrientation = backStackEntry.savedStateHandle.get<String>("newImageOrientation") val newImageOrientation = backStackEntry.savedStateHandle.get<String>("newImageOrientation")
val newVideoUri = backStackEntry.savedStateHandle.get<String>("newVideoUri") val newVideoUri = backStackEntry.savedStateHandle.get<String>("newVideoUri")
@ -101,7 +98,7 @@ fun NavGraph(
animalId = currentId ?: "unknown" animalId = currentId ?: "unknown"
)) ))
} else { } else {
navController.navigate(Route.CameraScreen(orientation = orientation, animalId = currentId ?: "unknown")) // navController.navigate(Route.CameraScreen(orientation = orientation, animalId = currentId ?: "unknown")) // Commented until existing camera flow is restored or migrated
} }
}, },
onTakeVideo = { onTakeVideo = {
@ -129,7 +126,7 @@ fun NavGraph(
composable<Route.CameraScreen> { backStackEntry -> composable<Route.CameraScreen> { backStackEntry ->
val route: Route.CameraScreen = backStackEntry.toRoute() val route: Route.CameraScreen = backStackEntry.toRoute()
CameraScreen(navController = navController, orientation = route.orientation, animalId = route.animalId) CameraCaptureScreen(navController = navController, orientation = route.orientation)
} }
composable<Route.VideoRecordScreen> { backStackEntry -> composable<Route.VideoRecordScreen> { backStackEntry ->
@ -146,7 +143,7 @@ fun NavGraph(
showBack = args.showBack, showBack = args.showBack,
onRetake = { onRetake = {
navController.popBackStack() navController.popBackStack()
navController.navigate(Route.CameraScreen(orientation = args.orientation, animalId = args.animalId)) // navController.navigate(Route.CameraScreen(...))
}, },
onAccept = { onAccept = {
navController.getBackStackEntry<Route.AddProfileScreen>().savedStateHandle["newImageUri"] = args.imageUri navController.getBackStackEntry<Route.AddProfileScreen>().savedStateHandle["newImageUri"] = args.imageUri

View File

@ -19,7 +19,9 @@ sealed class Route {
@Serializable @Serializable
data class RatingScreen(val animalId: String) : Route() data class RatingScreen(val animalId: String) : Route()
@Serializable @Serializable
data class CameraScreen(val orientation: String? = null, val animalId: String) : Route() data class CameraScreen(val orientation: String) : Route()
@Serializable
data class OldCameraScreen(val orientation: String? = null, val animalId: String) : Route()
@Serializable @Serializable
data class VideoRecordScreen(val animalId: String) : Route() data class VideoRecordScreen(val animalId: String) : Route()
@Serializable @Serializable

View File

@ -0,0 +1,11 @@
package com.example.livingai.pages.videopreview
import androidx.compose.runtime.Composable
import org.koin.androidx.compose.koinViewModel
@Composable
fun VideoPreviewScreen(
viewModel: VideoPreviewViewModel = koinViewModel()
) {
}

View File

@ -0,0 +1,5 @@
package com.example.livingai.pages.videopreview
import androidx.lifecycle.ViewModel
class VideoPreviewViewModel : ViewModel()

View File

@ -3,18 +3,25 @@ package com.example.livingai.utils
import android.content.Context import android.content.Context
import android.graphics.Bitmap import android.graphics.Bitmap
import android.graphics.BitmapFactory import android.graphics.BitmapFactory
import android.graphics.Canvas
import android.graphics.Color import android.graphics.Color
import android.util.Log import android.util.Log
import com.example.livingai.R import com.example.livingai.R
import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.ConcurrentHashMap
import kotlin.math.min
object SilhouetteManager { object SilhouetteManager {
private val originals = ConcurrentHashMap<String, Bitmap>() private val originals = ConcurrentHashMap<String, Bitmap>()
private val invertedPurple = ConcurrentHashMap<String, Bitmap>() private val invertedPurple = ConcurrentHashMap<String, Bitmap>()
private val weightedMasks = ConcurrentHashMap<String, Bitmap>()
fun getOriginal(name: String): Bitmap? = originals[name]
fun getInvertedPurple(name: String): Bitmap? = invertedPurple[name]
fun getWeightedMask(name: String): Bitmap? = weightedMasks[name]
fun initialize(context: Context, width: Int, height: Int) { fun initialize(context: Context, width: Int, height: Int) {
val resources = context.resources val resources = context.resources
val silhouetteList = mapOf( val silhouetteList = mapOf(
"front" to R.drawable.front_silhouette, "front" to R.drawable.front_silhouette,
"back" to R.drawable.back_silhouette, "back" to R.drawable.back_silhouette,
@ -25,22 +32,27 @@ object SilhouetteManager {
"angleview" to R.drawable.angleview_silhouette "angleview" to R.drawable.angleview_silhouette
) )
silhouetteList.entries.toList().forEach { (name, resId) -> silhouetteList.forEach { (name, resId) ->
val bmp = BitmapFactory.decodeResource(resources, resId) val bmp = BitmapFactory.decodeResource(resources, resId)
originals[name] = bmp originals[name] = bmp
Log.d("Silhouette", "Dims: ${width} x ${height}")
if (name == "front" || name == "back")
invertedPurple[name] = createInvertedPurpleBitmap(bmp, width, height)
else
invertedPurple[name] = createInvertedPurpleBitmap(bmp, height, width)
Log.d("Silhouette", "Dims Mask: ${invertedPurple[name]?.width} x ${invertedPurple[name]?.height}") // Fit image appropriately (front/back = W/H, others rotated)
val fitted = if (name == "front" || name == "back")
createInvertedPurpleBitmap(bmp, width, height)
else
createInvertedPurpleBitmap(bmp, height, width)
invertedPurple[name] = fitted
weightedMasks[name] = createSignedWeightedMask(fitted, fadeInside = 10, fadeOutside = 20)
Log.d("Silhouette", "Loaded mask: $name (${fitted.width} x ${fitted.height})")
} }
} }
// ------------------------------------------------------------------------
fun getOriginal(name: String): Bitmap? = originals[name] // STEP 1: Create "inverted purple" mask (transparent object becomes purple)
fun getInvertedPurple(name: String): Bitmap? = invertedPurple[name] // ------------------------------------------------------------------------
private fun createInvertedPurpleBitmap( private fun createInvertedPurpleBitmap(
src: Bitmap, src: Bitmap,
@ -48,11 +60,11 @@ object SilhouetteManager {
targetHeight: Int targetHeight: Int
): Bitmap { ): Bitmap {
val width = src.width val w = src.width
val height = src.height val h = src.height
val pixels = IntArray(width * height) val pixels = IntArray(w * h)
src.getPixels(pixels, 0, width, 0, 0, width, height) src.getPixels(pixels, 0, w, 0, 0, w, h)
val purple = Color.argb(255, 128, 0, 128) val purple = Color.argb(255, 128, 0, 128)
@ -61,9 +73,124 @@ object SilhouetteManager {
pixels[i] = if (alpha == 0) purple else 0x00000000 pixels[i] = if (alpha == 0) purple else 0x00000000
} }
val inverted = Bitmap.createBitmap(pixels, width, height, Bitmap.Config.ARGB_8888) val inverted = Bitmap.createBitmap(pixels, w, h, Bitmap.Config.ARGB_8888)
return Bitmap.createScaledBitmap(inverted, targetWidth, targetHeight, true)
return fitCenterToScreen(inverted, targetWidth, targetHeight)
} }
// ------------------------------------------------------------------------
// STEP 2: Create signed weighted mask (-1 to 1)
// ------------------------------------------------------------------------
private fun createSignedWeightedMask(
bitmap: Bitmap,
fadeInside: Int = 10,
fadeOutside: Int = 20
): Bitmap {
val w = bitmap.width
val h = bitmap.height
val pixels = IntArray(w * h)
bitmap.getPixels(pixels, 0, w, 0, 0, w, h)
fun idx(x: Int, y: Int) = y * w + x
// inside = 1 → silhouette purple
// inside = 0 → outside
val inside = IntArray(w * h)
for (i in pixels.indices) {
val alpha = pixels[i] ushr 24
inside[i] = if (alpha == 0) 1 else 0
}
// --------------------------------------------------------------------
// DISTANCES FOR INSIDE PIXELS (to nearest OUTSIDE pixel)
// --------------------------------------------------------------------
val distInside = IntArray(w * h) { Int.MAX_VALUE }
for (i in inside.indices) {
if (inside[i] == 0) distInside[i] = 0
}
// forward
for (y in 0 until h) {
for (x in 0 until w) {
val i = idx(x, y)
var best = distInside[i]
if (x > 0) best = min(best, distInside[idx(x - 1, y)] + 1)
if (y > 0) best = min(best, distInside[idx(x, y - 1)] + 1)
distInside[i] = best
}
}
// backward
for (y in h - 1 downTo 0) {
for (x in w - 1 downTo 0) {
val i = idx(x, y)
var best = distInside[i]
if (x < w - 1) best = min(best, distInside[idx(x + 1, y)] + 1)
if (y < h - 1) best = min(best, distInside[idx(x, y + 1)] + 1)
distInside[i] = best
}
}
// --------------------------------------------------------------------
// DISTANCES FOR OUTSIDE PIXELS (to nearest INSIDE pixel)
// --------------------------------------------------------------------
val distOutside = IntArray(w * h) { Int.MAX_VALUE }
for (i in inside.indices) {
if (inside[i] == 1) distOutside[i] = 0
}
// forward
for (y in 0 until h) {
for (x in 0 until w) {
val i = idx(x, y)
var best = distOutside[i]
if (x > 0) best = min(best, distOutside[idx(x - 1, y)] + 1)
if (y > 0) best = min(best, distOutside[idx(x, y - 1)] + 1)
distOutside[i] = best
}
}
// backward
for (y in h - 1 downTo 0) {
for (x in w - 1 downTo 0) {
val i = idx(x, y)
var best = distOutside[i]
if (x < w - 1) best = min(best, distOutside[idx(x + 1, y)] + 1)
if (y < h - 1) best = min(best, distOutside[idx(x, y + 1)] + 1)
distOutside[i] = best
}
}
// --------------------------------------------------------------------
// BUILD FINAL SIGNED MASK (-1 to +1)
// --------------------------------------------------------------------
val out = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888)
val outPixels = IntArray(w * h)
for (i in outPixels.indices) {
val weight: Float = if (inside[i] == 1) {
// Inside silhouette: +1 to 0
val d = distInside[i]
if (d >= fadeInside) 1f else d.toFloat() / fadeInside
} else {
// Outside: 0 to -1
val d = distOutside[i]
val neg = -(d.toFloat() / fadeOutside)
neg.coerceAtLeast(-1f)
}
// Convert -1..1 → grayscale for debugging
val gray = (((weight + 1f) / 2f) * 255).toInt().coerceIn(0, 255)
outPixels[i] = Color.argb(255, gray, gray, gray)
}
out.setPixels(outPixels, 0, w, 0, 0, w, h)
return out
}
} }

View File

@ -5,6 +5,7 @@ import android.hardware.Sensor
import android.hardware.SensorEvent import android.hardware.SensorEvent
import android.hardware.SensorEventListener import android.hardware.SensorEventListener
import android.hardware.SensorManager import android.hardware.SensorManager
import android.util.Log
import kotlinx.coroutines.flow.MutableStateFlow import kotlinx.coroutines.flow.MutableStateFlow
import kotlinx.coroutines.flow.StateFlow import kotlinx.coroutines.flow.StateFlow
import kotlinx.coroutines.flow.asStateFlow import kotlinx.coroutines.flow.asStateFlow
@ -19,17 +20,27 @@ class TiltSensorManager(
private val _tilt = MutableStateFlow(Triple(0f, 0f, 0f)) // pitch, roll, azimuth private val _tilt = MutableStateFlow(Triple(0f, 0f, 0f)) // pitch, roll, azimuth
val tilt: StateFlow<Triple<Float, Float, Float>> = _tilt.asStateFlow() val tilt: StateFlow<Triple<Float, Float, Float>> = _tilt.asStateFlow()
fun start() { init {
rotationVectorSensor?.let { Log.d("TiltSensorManager", "TiltSensorManager initialized.")
sensorManager.registerListener( if (rotationVectorSensor == null) {
this, Log.e("TiltSensorManager", "Rotation Vector Sensor not available on this device.")
it,
SensorManager.SENSOR_DELAY_GAME
)
} }
} }
fun start() {
Log.d("TiltSensorManager", "start() called.")
rotationVectorSensor?.let {
val registered = sensorManager.registerListener(
this,
it,
SensorManager.SENSOR_DELAY_UI
)
Log.d("TiltSensorManager", "Listener registration attempted. Success: $registered")
} ?: Log.e("TiltSensorManager", "Cannot start listener, sensor is null.")
}
fun stop() { fun stop() {
Log.d("TiltSensorManager", "stop() called.")
sensorManager.unregisterListener(this) sensorManager.unregisterListener(this)
} }
@ -51,8 +62,12 @@ class TiltSensorManager(
val pitch = Math.toDegrees(orientationAngles[1].toDouble()).toFloat() val pitch = Math.toDegrees(orientationAngles[1].toDouble()).toFloat()
val roll = Math.toDegrees(orientationAngles[2].toDouble()).toFloat() val roll = Math.toDegrees(orientationAngles[2].toDouble()).toFloat()
Log.d("TiltSensor", "Pitch: $pitch, Roll: $roll, Azimuth: $azimuth")
_tilt.value = Triple(pitch, roll, azimuth) _tilt.value = Triple(pitch, roll, azimuth)
} }
override fun onAccuracyChanged(sensor: Sensor?, accuracy: Int) {} override fun onAccuracyChanged(sensor: Sensor?, accuracy: Int) {
Log.d("TiltSensorManager", "Accuracy changed to $accuracy for sensor ${sensor?.name}")
}
} }