My Robotics Internship at the ISIR – Meet Sn.AI.l

Background

The Goal

My Role in making Sn.AI.l

A very organized workspace
  1. Sending actions from the website to Python so the robot could respond.

Electrical Design

Soft Shutdown System

The Solution

Designing The PCB

Power System

Protection and Smoothing

Control and GPIO Pin Shortages

UART Communication

Object Recognition

# Load YOLOv5n ONNX model and COCO classes
YOLOV5N_ONNX_PATH = 'yolov5n.onnx'
COCO_NAMES_PATH = 'coco.names'

with open(COCO_NAMES_PATH, 'r') as f:
    COCO_CLASSES = [line.strip() for line in f.readlines() if line.strip()]

ort_session = ort.InferenceSession(YOLOV5N_ONNX_PATH)
# MobileNetV2 for embeddings
mobilenet = mobilenet_v2(pretrained=True)
mobilenet_embed = torch.nn.Sequential(
    mobilenet.features,
    torch.nn.AdaptiveAvgPool2d((1, 1)),
    torch.nn.Flatten()
)
# Custom object detection using YOLO boxes
custom_dets_yolo = detect_custom_objects_yolo_boxes(
    frame_bgr, yolo_boxes_for_custom,
    object_names=requested_objects, threshold=0.6
)

# Sliding window custom detection
custom_dets_sw = detect_custom_objects(
    frame_bgr, object_names=requested_objects, threshold=0.6
)

Key Technical Challenges

def preprocess_yolo_image(frame_bgr, img_size=640):
    # Resize and pad image for YOLOv5
    r = img_size / max(h0, w0)
    new_unpad = (int(round(w0 * r)), int(round(h0 * r)))
    # ... padding logic ...
    return img, r, (dw, dh), (w0, h0)
def get_average_depth_in_box(box, frame_shape):
    x0, y0, x1, y1 = box
    # Extract depth data from bounding box region
    return average_distance

Wrapping Up

Blockly Editor

Blockly.defineBlocksWithJsonArray([

{

"type": "when_flag_clicked",

"message0": "when flag clicked",

"message1": "%1",

"args1": [{ "type": "input_statement", "name": "DO" }],

"colour": 65

},

{

"type": "object_detected",

"message0": "object %1 detected?",

"args0": [{ "type": "field_input", "name": "OBJECT", "text": "object_name" }],

"output": "Boolean",

"colour": 230

}
// ... more blocks

]);

Live Camera Feed

function checkCameraUsage() {

const code = Blockly.JavaScript.workspaceToCode(blocklyWorkspace);

const usesCamera = code.includes('isObjectDetected') ||

code.includes('getObjectDistance');

if (usesCamera) {

cameraContainer.style.display = 'flex';

startCameraFeed();

startDetectionTracking(code);

}

}
function drawDetectionBoxes() {

currentDetections.forEach(detection => {

if (detection.box && detectedObjects.has(detection.class)) {

const [x1, y1, x2, y2] = detection.box;

const box = document.createElement('div');

box.className = 'detection-box';

box.style.left = (x1 / 640 * 100) + '%';

box.style.top = (y1 / 360 * 100) + '%';

// ... positioning logic

}

});

}

Command Tracking & Backend Communication

function addCommandDisplay(action, result, type = 'result') {

const time = new Date().toLocaleTimeString();

const commandDiv = document.createElement('div');

commandDiv.innerHTML = `

<span class="time">${time}</span>

<span class="action">${action}</span>

<span class="${resultClass}">${result}</span>

`;

}

@app.post("/move")

def move_robot(cmd: Command):

return move_robot_command(cmd.action)

@app.get("/api/camera/feed")

def get_camera_feed():

return StreamingResponse(mjpeg_stream(),

media_type="multipart/x-mixed-replace; boundary=frame")

@app.post("/api/object-recognition/detect")

async def detect_objects(request: Request):
def move_robot_command(action: str):

if ser and ser.is_open:

command_to_send_to_arduino = f"{action}\n"

ser.write(command_to_send_to_arduino.encode('utf-8'))

return {"status": "ok", "received": action}

Script Management

function saveScript() {

if (currentMode === 'blockly') {

const xml = Blockly.Xml.workspaceToDom(blocklyWorkspace);

const xmlText = Blockly.Xml.domToPrettyText(xml);

downloadFile('blockly_script.xml', xmlText, 'application/xml');

}

}

Wrapping Up

Jetson Orin Nano Setup

SLAM

How It Works

Wrapping Up SLAM

Assembly

PCB Soldering

Full Electrical Assembly

The Final Result

Isn’t she beautiful

Conclusion