UI Customization
How to customize the CameraView UI to match your app design.
Prerequisites
This document assumes you have read Basic Usage first. Learn how to create and initialize CameraViewModel first.
Overview
CameraView provides a pet detection camera UI and can be customized in the following ways:
- Add Overlay UI - Place UI on top of CameraView using ZStack (progress, buttons, guides, etc.)
- Place Guide Above Tracker - Use
floatingGuideContentfor automatically positioned UI above detected objects - Fully Custom Implementation - Build from scratch using only
CameraViewModel
In most cases, combining methods 1 and 2 is sufficient.
Overlay UI Development Guide
You can place additional UI on top of CameraView using ZStack. Most UI elements like buttons, progress indicators, and status messages are implemented this way.
Basic Pattern
struct CameraScreenView: View {
@ObservedObject var cameraViewModel: CameraViewModel
@Environment(\.dismiss) var dismiss
var body: some View {
ZStack {
// Camera view
CameraView(viewModel: cameraViewModel) {
EmptyView() // Or floatingGuideContent
}
// Additional overlay UI
VStack {
HStack {
Button("Close") { dismiss() }
Spacer()
}
Spacer()
// Guide message and status display
statusOverlay
}
.padding()
}
}
@ViewBuilder
private var statusOverlay: some View {
VStack(spacing: 12) {
Text(statusMessage)
.font(.headline)
.foregroundColor(.white)
.padding()
.background(Color.black.opacity(0.7))
.cornerRadius(12)
// Show progress only when processing
if case .processing = cameraViewModel.detectionStatus,
cameraViewModel.currentDetectionProgress > 0 {
ProgressView(value: Double(cameraViewModel.currentDetectionProgress) / 100.0)
.progressViewStyle(LinearProgressViewStyle(tint: .white))
.frame(maxWidth: 300)
Text("\(cameraViewModel.currentDetectionProgress)%")
.font(.caption)
.foregroundColor(.white.opacity(0.8))
}
}
}
private var statusMessage: String {
switch cameraViewModel.detectionStatus {
case .noObject:
return "Frame your pet in the screen"
case .processing:
return "Detecting..."
case .detected:
return "Perfect! Please wait"
case .failed(let reason):
return failureMessage(for: reason)
}
}
private func failureMessage(for reason: DetectionFailureReason) -> String {
switch reason {
case .tooFarAway: return "Please move closer"
case .tooClose: return "Too close"
case .tooBright: return "Too bright"
case .tooDark: return "Poor lighting"
case .tooBlurred: return "Shake detected"
default: return "Please try again"
}
}
}Key Points:
- Wrap CameraView with ZStack to freely place UI
- Observe ViewModel state with
@ObservedObjectto update UI dynamically - Use VStack/HStack to place UI at top/bottom/sides
Example: Status-Based Colors and Icons
You can provide more intuitive feedback by changing colors and icons based on status.
@ViewBuilder
private var statusOverlay: some View {
HStack(spacing: 12) {
Image(systemName: statusIcon)
.font(.title2)
.foregroundColor(.white)
VStack(alignment: .leading, spacing: 4) {
Text(statusMessage)
.font(.headline)
.foregroundColor(.white)
if let subMessage = statusSubMessage {
Text(subMessage)
.font(.caption)
.foregroundColor(.white.opacity(0.8))
}
}
}
.padding()
.background(statusColor.opacity(0.8))
.cornerRadius(12)
.animation(.easeInOut(duration: 0.3), value: cameraViewModel.detectionStatus)
}
private var statusIcon: String {
switch cameraViewModel.detectionStatus {
case .noObject: return "viewfinder"
case .processing: return "camera.metering.center.weighted"
case .detected: return "checkmark.circle.fill"
case .failed: return "exclamationmark.triangle.fill"
}
}
private var statusColor: Color {
switch cameraViewModel.detectionStatus {
case .noObject: return .gray
case .processing: return .blue
case .detected: return .green
case .failed: return .red
}
}
private var statusSubMessage: String? {
switch cameraViewModel.detectionStatus {
case .processing:
return "Stay still and wait"
default:
return nil
}
}Key Points:
- Provide visual feedback with different icons and colors per status
- Handle state transitions smoothly with
.animationmodifier - Provide additional information with sub-messages
Example: Bounding Box Visualization
If you want to visually emphasize the detected area, you can use detectedObjectNormalizedRect.
import SwiftUI
import PetnowUI
struct CameraWithBoundingBoxView: View {
@ObservedObject var cameraViewModel: CameraViewModel
@Environment(\.dismiss) var dismiss
var body: some View {
ZStack {
// Camera view
CameraView(viewModel: cameraViewModel) {
EmptyView()
}
// Bounding box outline (needs conversion from normalized to pixel coordinates)
GeometryReader { geometry in
if let normalizedRect = cameraViewModel.detectedObjectNormalizedRect {
let boundingBox = convertToPixelRect(normalizedRect: normalizedRect, viewSize: geometry.size)
Rectangle()
.stroke(borderColor, lineWidth: 3)
.frame(width: boundingBox.width, height: boundingBox.height)
.position(x: boundingBox.midX, y: boundingBox.midY)
.opacity(0.8)
.animation(.easeInOut(duration: 0.3), value: normalizedRect)
}
}
// Top UI
VStack {
HStack {
Button("Close") { dismiss() }
.foregroundColor(.white)
.padding()
Spacer()
}
Spacer()
}
}
}
private var borderColor: Color {
switch cameraViewModel.detectionStatus {
case .detected: return .green
case .processing: return .yellow
case .failed: return .red
default: return .gray
}
}
private func convertToPixelRect(normalizedRect: CGRect, viewSize: CGSize) -> CGRect {
// Camera ratio (3:4)
let videoAspectRatio: CGFloat = 3.0 / 4.0
let scaledHeight = viewSize.height
let scaledWidth = scaledHeight * videoAspectRatio
let xOffset = (viewSize.width - scaledWidth) / 2
let drawingRect = CGRect(x: xOffset, y: 0, width: scaledWidth, height: scaledHeight)
return CGRect(
x: drawingRect.origin.x + (normalizedRect.origin.x * drawingRect.width),
y: drawingRect.origin.y + (normalizedRect.origin.y * drawingRect.height),
width: normalizedRect.width * drawingRect.width,
height: normalizedRect.height * drawingRect.height
)
}
}Key Points:
detectedObjectNormalizedRectuses normalized coordinates (0.0~1.0)- Convert to pixel coordinates with
convertToPixelRectfunction to display on screen - Provide intuitive feedback by changing colors based on status
Place UI Above Tracker with floatingGuideContent
We've seen how to place UI at the top/bottom with overlays. Now let's learn how to automatically place UI directly above detected objects (tracker).
How It Works
When you pass a @ViewBuilder closure to the CameraView constructor, it automatically handles:
- Automatic Position Adjustment: Places above detected object (moves down if overlapping)
- Screen Boundary Correction: Automatically clamps to prevent going off-screen
- Center Alignment: Positioned based on bounding box center
This method is useful when you need a guide that follows the detection area.
Example: Basic Text Guide
Let's start with the simplest example.
CameraView(viewModel: cameraViewModel) {
Text("Center the nose")
.font(.headline)
.foregroundColor(.white)
.padding()
.background(Color.black.opacity(0.7))
.cornerRadius(8)
}Key Points:
- Any SwiftUI View can be used
- Set background opacity appropriately for readability
- Position automatically adjusts to follow detected object
Example: Dynamic Guide by Status
You can display different messages and styles based on status.
CameraView(viewModel: cameraViewModel) {
guideContent
}
@ViewBuilder
private var guideContent: some View {
HStack(spacing: 12) {
Image(systemName: statusIcon)
.font(.title2)
.foregroundColor(.white)
Text(statusMessage)
.font(.headline)
.foregroundColor(.white)
}
.padding()
.background(statusColor.opacity(0.8))
.cornerRadius(12)
.animation(.easeInOut(duration: 0.3), value: cameraViewModel.detectionStatus)
}
private var statusIcon: String {
switch cameraViewModel.detectionStatus {
case .noObject: return "viewfinder"
case .processing: return "camera.metering.center.weighted"
case .detected: return "checkmark.circle.fill"
case .failed: return "exclamationmark.triangle.fill"
}
}
private var statusMessage: String {
switch cameraViewModel.detectionStatus {
case .noObject: return "Searching for pet..."
case .processing: return "Detecting..."
case .detected: return "Done!"
case .failed: return "Retry"
}
}
private var statusColor: Color {
switch cameraViewModel.detectionStatus {
case .noObject: return .gray
case .processing: return .blue
case .detected: return .green
case .failed: return .red
}
}Key Points:
- Provide intuitive feedback by changing icons and colors per status
- Handle state transitions smoothly with
.animation - User's gaze is naturally guided as it follows the detection area
Example: Species-Specific Guide
You can display different guides for dogs and cats.
import SwiftUI
import PetnowUI
struct SpeciesGuideView: View {
@ObservedObject var cameraViewModel: CameraViewModel
var body: some View {
CameraView(viewModel: cameraViewModel) {
VStack(spacing: 12) {
Image(systemName: speciesIcon)
.font(.system(size: 40))
.foregroundColor(.white)
Text(speciesGuide)
.font(.headline)
.foregroundColor(.white)
.multilineTextAlignment(.center)
}
.padding()
.background(speciesColor.opacity(0.8))
.cornerRadius(16)
}
}
private var speciesIcon: String {
cameraViewModel.species == .dog ? "pawprint.fill" : "cat.fill"
}
private var speciesGuide: String {
cameraViewModel.species == .dog
? "Bring your dog's nose closer"
: "Face your cat's face forward"
}
private var speciesColor: Color {
cameraViewModel.species == .dog ? .blue : .orange
}
}Key Points:
- Check current species with
CameraViewModel.species - Provide more intuitive UI with species-appropriate icons, colors, and messages
Combining Overlay and floatingGuideContent
In most cases, it's most effective to use overlays for top/bottom and floatingGuideContent for around the detection area.
Fully Custom UI Implementation
Build UI from scratch using only CameraViewModel without CameraView. Only use this for React Native, Flutter cross-platform integration or when completely custom design is required.
Overlay/floatingGuideContent is sufficient in most cases
This section is for special situations where CameraView cannot be used at all. For SwiftUI apps, consider the previous methods first.
Core Principle
CameraViewModel is UI-independent and provides only two core elements:
captureSession- AVFoundation session for displaying camera preview@Publishedproperties - Subscribe to detection status, progress, etc.
You can implement fully custom UI with these two elements.
SwiftUI Minimal Implementation
import SwiftUI
import AVFoundation
import PetnowUI
struct MinimalCustomCameraView: View {
@ObservedObject var viewModel: CameraViewModel
var body: some View {
ZStack {
// 1. Camera preview
CameraPreviewLayer(session: viewModel.captureSession)
.edgesIgnoringSafeArea(.all)
// 2. Status display
VStack {
Spacer()
Text(statusText)
.padding()
.background(Color.black.opacity(0.7))
.foregroundColor(.white)
.cornerRadius(8)
}
}
}
private var statusText: String {
switch viewModel.detectionStatus {
case .noObject:
return "Frame your pet in the screen"
case .processing:
return "Detecting... \(viewModel.currentDetectionProgress)%"
case .detected:
return "Done!"
case .failed(let reason):
return "Failed: \(reason)"
}
}
}
// Display AVCaptureSession in SwiftUI
struct CameraPreviewLayer: UIViewRepresentable {
let session: AVCaptureSession
func makeUIView(context: Context) -> UIView {
let view = UIView()
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
// Setup for automatic layout adjustment
DispatchQueue.main.async {
previewLayer.frame = view.bounds
}
return view
}
func updateUIView(_ uiView: UIView, context: Context) {
if let layer = uiView.layer.sublayers?.first as? AVCaptureVideoPreviewLayer {
DispatchQueue.main.async {
layer.frame = uiView.bounds
}
}
}
}Key Points:
- Wrap
captureSessionwithAVCaptureVideoPreviewLayerto display camera screen - Subscribe to
@Publishedproperties to react to status changes - The rest is the same as regular SwiftUI development
UIKit Minimal Implementation
In UIKit environment, it's even simpler:
import UIKit
import AVFoundation
import PetnowUI
import Combine
class CustomCameraViewController: UIViewController {
private let viewModel: CameraViewModel
private var cancellables = Set<AnyCancellable>()
private let statusLabel = UILabel()
init(viewModel: CameraViewModel) {
self.viewModel = viewModel
super.init(nibName: nil, bundle: nil)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func viewDidLoad() {
super.viewDidLoad()
// 1. Add camera preview layer
let previewLayer = AVCaptureVideoPreviewLayer(session: viewModel.captureSession)
previewLayer.frame = view.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
// 2. Setup status label
statusLabel.textAlignment = .center
statusLabel.textColor = .white
statusLabel.backgroundColor = UIColor.black.withAlphaComponent(0.7)
statusLabel.layer.cornerRadius = 8
statusLabel.clipsToBounds = true
view.addSubview(statusLabel)
// 3. Subscribe to status
viewModel.$detectionStatus
.sink { [weak self] status in
self?.updateStatus(status)
}
.store(in: &cancellables)
viewModel.$currentDetectionProgress
.sink { [weak self] progress in
self?.statusLabel.text = "Detecting... \(progress)%"
}
.store(in: &cancellables)
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
// Update layout
if let previewLayer = view.layer.sublayers?.first as? AVCaptureVideoPreviewLayer {
previewLayer.frame = view.bounds
}
statusLabel.frame = CGRect(
x: 20,
y: view.bounds.height - 100,
width: view.bounds.width - 40,
height: 60
)
}
private func updateStatus(_ status: DetectionStatus) {
switch status {
case .noObject:
statusLabel.text = "Frame your pet in the screen"
case .processing:
statusLabel.text = "Detecting..."
case .detected:
statusLabel.text = "Done!"
case .failed(let reason):
statusLabel.text = "Failed: \(reason)"
}
}
}Key Points:
- Add
AVCaptureVideoPreviewLayerdirectly toview.layer - Subscribe to status with Combine's
sink - Use UIKit's standard layout methods
React Native / Flutter Integration
In cross-platform environments, pass captureSession with a CaptureContext object:
// Native Module
@objc(PetnowCameraModule)
class PetnowCameraModule: RCTEventEmitter {
private var viewModels: [String: CameraViewModel] = [:]
private var cancellables: [String: Set<AnyCancellable>] = [:]
@objc func initialize(
_ species: String,
apiKey: String,
sessionId: String,
resolver: @escaping RCTPromiseResolveBlock,
rejecter: @escaping RCTPromiseRejectBlock
) {
let contextId = UUID().uuidString
let viewModel = CameraViewModel(
species: species == "dog" ? .dog : .cat,
cameraPurpose: .forRegisterFromProfile
)
Task {
do {
try await viewModel.initializeCamera(
licenseInfo: LicenseInfo(apiKey: apiKey, isDebugMode: true),
initialPosition: .back,
captureSessionId: sessionId
) { result in
self.sendEvent(withName: "onResult", body: ["contextId": contextId, "data": result])
}
// Send status to JS
var cancellables = Set<AnyCancellable>()
viewModel.$detectionStatus
.sink { [weak self] status in
self?.sendEvent(withName: "onStatusChange", body: ["contextId": contextId, "status": "\(status)"])
}
.store(in: &cancellables)
self.viewModels[contextId] = viewModel
self.cancellables[contextId] = cancellables
// Return CaptureContext
resolver([
"contextId": contextId,
"captureSession": viewModel.captureSession
])
} catch {
rejecter("INIT_ERROR", error.localizedDescription, error)
}
}
}
}
// Native View Component
@objc(PetnowCameraView)
class PetnowCameraView: UIView {
private var previewLayer: AVCaptureVideoPreviewLayer?
@objc var captureContext: NSDictionary? {
didSet {
guard let session = captureContext?["captureSession"] as? AVCaptureSession else { return }
previewLayer?.removeFromSuperlayer()
let layer = AVCaptureVideoPreviewLayer(session: session)
layer.frame = bounds
layer.videoGravity = .resizeAspectFill
self.layer.addSublayer(layer)
self.previewLayer = layer
}
}
override func layoutSubviews() {
super.layoutSubviews()
previewLayer?.frame = bounds
}
}// JavaScript usage example
import { NativeModules, requireNativeComponent } from 'react-native';
const PetnowCamera = requireNativeComponent('PetnowCameraView');
const { PetnowCameraModule } = NativeModules;
function CameraScreen() {
const [context, setContext] = useState(null);
useEffect(() => {
// Create CaptureContext
PetnowCameraModule.initialize('dog', 'your-api-key', 'session-123')
.then(ctx => setContext(ctx));
}, []);
if (!context) return <LoadingView />;
// Pass context to View
return <PetnowCamera captureContext={context} style={{ flex: 1 }} />;
}Key Points:
initialize()returnsCaptureContextobject (contextId+captureSession)- View receives
captureContextprop and automatically sets up preview - Match events with ViewModel using
contextId
Advanced Users Only
This method requires deep understanding of AVFoundation, Combine, and UIKit/SwiftUI. Use CameraView if not cross-platform.
Next Steps
Once you've mastered customization, check out:
Recommended Learning Order
- Sound Guide - Change capture sounds
- Troubleshooting - Common issues and solutions
References
- Basic Usage - CameraView basic integration
- UI Module Overview - Component details