-->
Have you ever struggled to find that one photo among thousands in your gallery? Or wished your photo app could automatically organize images by content, people, or locations? Today, we’ll build a smart photo gallery that does exactly that using DeepRequest’s AI Vision APIs.
In this comprehensive tutorial, you’ll learn how to create a web application that can:
Our smart photo gallery will feature:
Before we start, make sure you have:
mkdir smart-photo-gallery
cd smart-photo-gallery
npm init -y
npm install express multer axios dotenv cors
npm install --save-dev nodemon
smart-photo-gallery/
├── public/
│ ├── index.html
│ ├── style.css
│ └── script.js
├── uploads/
├── server.js
├── .env
└── package.json
Create a .env
file:
RAPIDAPI_KEY=your_rapidapi_key_here
DEEPREQUEST_HOST=deeprequest.rapidapi.com
PORT=3000
server.js
)const express = require('express');
const multer = require('multer');
const axios = require('axios');
const path = require('path');
const fs = require('fs');
require('dotenv').config();
const app = express();
const PORT = process.env.PORT || 3000;
// Middleware
app.use(express.static('public'));
app.use(express.json());
app.use(require('cors')());
// Configure multer for file uploads
const storage = multer.diskStorage({
destination: 'uploads/',
filename: (req, file, cb) => {
cb(null, Date.now() + '-' + file.originalname);
}
});
const upload = multer({ storage });
// DeepRequest API configuration
const API_CONFIG = {
headers: {
'X-RapidAPI-Key': process.env.RAPIDAPI_KEY,
'X-RapidAPI-Host': process.env.DEEPREQUEST_HOST,
'Content-Type': 'multipart/form-data'
}
};
// API endpoints for different vision services
const ENDPOINTS = {
objectDetection: 'https://rapidapi.com/organization/deeprequest/object-detection',
faceDetection: 'https://rapidapi.com/organization/deeprequest/face-detection',
textRecognition: 'https://rapidapi.com/organization/deeprequest/ocr',
brandDetection: 'https://rapidapi.com/organization/deeprequest/brand-detection',
qualityAssessment: 'https://rapidapi.com/organization/deeprequest/quality-assessment'
};
// Analyze image with multiple AI services
async function analyzeImage(imagePath) {
const imageBuffer = fs.readFileSync(imagePath);
const formData = new FormData();
formData.append('image', imageBuffer);
try {
// Run all analyses in parallel for better performance
const [objects, faces, text, brands, quality] = await Promise.allSettled([
analyzeObjects(formData),
analyzeFaces(formData),
recognizeText(formData),
detectBrands(formData),
assessQuality(formData)
]);
return {
objects: objects.status === 'fulfilled' ? objects.value : [],
faces: faces.status === 'fulfilled' ? faces.value : [],
text: text.status === 'fulfilled' ? text.value : '',
brands: brands.status === 'fulfilled' ? brands.value : [],
quality: quality.status === 'fulfilled' ? quality.value : { score: 0.5 }
};
} catch (error) {
console.error('Analysis error:', error);
throw error;
}
}
// Individual analysis functions
async function analyzeObjects(formData) {
const response = await axios.post(ENDPOINTS.objectDetection, formData, API_CONFIG);
return response.data.detections || [];
}
async function analyzeFaces(formData) {
const response = await axios.post(ENDPOINTS.faceDetection, formData, API_CONFIG);
return response.data.faces || [];
}
async function recognizeText(formData) {
const response = await axios.post(ENDPOINTS.textRecognition, formData, API_CONFIG);
return response.data.text || '';
}
async function detectBrands(formData) {
const response = await axios.post(ENDPOINTS.brandDetection, formData, API_CONFIG);
return response.data.brands || [];
}
async function assessQuality(formData) {
const response = await axios.post(ENDPOINTS.qualityAssessment, formData, API_CONFIG);
return response.data.quality || { score: 0.5 };
}
// Routes
app.post('/upload', upload.single('photo'), async (req, res) => {
if (!req.file) {
return res.status(400).json({ error: 'No file uploaded' });
}
try {
const analysis = await analyzeImage(req.file.path);
// Create photo metadata
const photoData = {
id: Date.now(),
filename: req.file.filename,
originalName: req.file.originalname,
path: req.file.path,
size: req.file.size,
uploadDate: new Date().toISOString(),
analysis: analysis,
tags: generateTags(analysis)
};
// Save to database (for demo, we'll use file system)
savePhotoMetadata(photoData);
res.json({
success: true,
photo: photoData
});
} catch (error) {
console.error('Upload error:', error);
res.status(500).json({ error: 'Analysis failed' });
}
});
// Generate searchable tags from analysis
function generateTags(analysis) {
const tags = new Set();
// Add object tags
analysis.objects.forEach(obj => {
if (obj.confidence > 0.7) {
tags.add(obj.class.toLowerCase());
}
});
// Add face-related tags
if (analysis.faces.length > 0) {
tags.add('people');
tags.add(`${analysis.faces.length}-people`);
}
// Add brand tags
analysis.brands.forEach(brand => {
if (brand.confidence > 0.8) {
tags.add(brand.name.toLowerCase());
}
});
// Add quality tags
if (analysis.quality.score > 0.8) {
tags.add('high-quality');
} else if (analysis.quality.score < 0.3) {
tags.add('low-quality');
}
// Add text-related tags
if (analysis.text && analysis.text.length > 10) {
tags.add('contains-text');
}
return Array.from(tags);
}
// Simple file-based storage for demo
function savePhotoMetadata(photoData) {
const dbPath = 'photo-database.json';
let database = [];
if (fs.existsSync(dbPath)) {
database = JSON.parse(fs.readFileSync(dbPath, 'utf8'));
}
database.push(photoData);
fs.writeFileSync(dbPath, JSON.stringify(database, null, 2));
}
// Get all photos
app.get('/photos', (req, res) => {
const dbPath = 'photo-database.json';
if (!fs.existsSync(dbPath)) {
return res.json([]);
}
const database = JSON.parse(fs.readFileSync(dbPath, 'utf8'));
res.json(database);
});
// Search photos
app.get('/search', (req, res) => {
const { q } = req.query;
const dbPath = 'photo-database.json';
if (!fs.existsSync(dbPath)) {
return res.json([]);
}
const database = JSON.parse(fs.readFileSync(dbPath, 'utf8'));
const results = database.filter(photo =>
photo.tags.some(tag => tag.includes(q.toLowerCase())) ||
photo.analysis.text.toLowerCase().includes(q.toLowerCase())
);
res.json(results);
});
// Serve uploaded images
app.use('/uploads', express.static('uploads'));
app.listen(PORT, () => {
console.log(`Smart Photo Gallery running on http://localhost:${PORT}`);
});
public/index.html
)<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Smart Photo Gallery - AI Powered</title>
<link rel="stylesheet" href="style.css">
</head>
<body>
<div class="container">
<header>
<h1>🤖 Smart Photo Gallery</h1>
<p>Powered by DeepRequest AI Vision APIs</p>
</header>
<div class="upload-section">
<div class="upload-zone" id="uploadZone">
<div class="upload-content">
<svg class="upload-icon" viewBox="0 0 24 24">
<path d="M14,2H6A2,2 0 0,0 4,4V20A2,2 0 0,0 6,22H18A2,2 0 0,0 20,20V8L14,2M18,20H6V4H13V9H18V20Z" />
</svg>
<h3>Drop photos here or click to upload</h3>
<p>AI will automatically analyze and tag your images</p>
</div>
<input type="file" id="fileInput" multiple accept="image/*" hidden>
</div>
<div class="progress-section" id="progressSection" style="display: none;">
<div class="progress-bar">
<div class="progress-fill" id="progressFill"></div>
</div>
<p id="progressText">Analyzing images with AI...</p>
</div>
</div>
<div class="search-section">
<div class="search-bar">
<input type="text" id="searchInput" placeholder="Search by objects, people, text, brands...">
<button id="searchBtn">🔍 Search</button>
</div>
<div class="quick-filters">
<button class="filter-btn" data-filter="people">👥 People</button>
<button class="filter-btn" data-filter="high-quality">✨ High Quality</button>
<button class="filter-btn" data-filter="contains-text">📄 Contains Text</button>
<button class="filter-btn" data-filter="car">🚗 Cars</button>
<button class="filter-btn" data-filter="food">🍕 Food</button>
</div>
</div>
<div class="gallery-section">
<div class="stats" id="stats">
<span id="photoCount">0 photos</span>
<span id="analysisStatus"></span>
</div>
<div class="photo-grid" id="photoGrid">
<!-- Photos will be dynamically inserted here -->
</div>
</div>
<div class="photo-modal" id="photoModal" style="display: none;">
<div class="modal-content">
<span class="close-btn" id="closeModal">×</span>
<img id="modalImage" src="" alt="">
<div class="photo-info">
<h3 id="modalTitle"></h3>
<div class="analysis-results" id="analysisResults"></div>
</div>
</div>
</div>
</div>
<script src="script.js"></script>
</body>
</html>
public/script.js
)class SmartPhotoGallery {
constructor() {
this.photos = [];
this.initializeEventListeners();
this.loadPhotos();
}
initializeEventListeners() {
const uploadZone = document.getElementById('uploadZone');
const fileInput = document.getElementById('fileInput');
const searchInput = document.getElementById('searchInput');
const searchBtn = document.getElementById('searchBtn');
// Upload zone events
uploadZone.addEventListener('click', () => fileInput.click());
uploadZone.addEventListener('dragover', this.handleDragOver.bind(this));
uploadZone.addEventListener('drop', this.handleDrop.bind(this));
fileInput.addEventListener('change', this.handleFileSelect.bind(this));
// Search events
searchBtn.addEventListener('click', this.performSearch.bind(this));
searchInput.addEventListener('keypress', (e) => {
if (e.key === 'Enter') this.performSearch();
});
// Quick filter events
document.querySelectorAll('.filter-btn').forEach(btn => {
btn.addEventListener('click', (e) => {
const filter = e.currentTarget.dataset.filter;
this.filterPhotos(filter);
});
});
// Modal events
document.getElementById('closeModal').addEventListener('click', this.closeModal.bind(this));
}
async loadPhotos() {
try {
const response = await fetch('/photos');
this.photos = await response.json();
this.renderPhotos(this.photos);
this.updateStats();
} catch (error) {
console.error('Error loading photos:', error);
}
}
handleDragOver(e) {
e.preventDefault();
e.currentTarget.classList.add('drag-over');
}
handleDrop(e) {
e.preventDefault();
e.currentTarget.classList.remove('drag-over');
const files = Array.from(e.dataTransfer.files).filter(file =>
file.type.startsWith('image/')
);
this.uploadFiles(files);
}
handleFileSelect(e) {
const files = Array.from(e.target.files);
this.uploadFiles(files);
}
async uploadFiles(files) {
if (files.length === 0) return;
const progressSection = document.getElementById('progressSection');
const progressFill = document.getElementById('progressFill');
const progressText = document.getElementById('progressText');
progressSection.style.display = 'block';
let completed = 0;
for (const file of files) {
try {
const formData = new FormData();
formData.append('photo', file);
progressText.textContent = `Analyzing ${file.name}...`;
const response = await fetch('/upload', {
method: 'POST',
body: formData
});
const result = await response.json();
if (result.success) {
this.photos.unshift(result.photo);
this.addPhotoToGrid(result.photo);
}
completed++;
const progress = (completed / files.length) * 100;
progressFill.style.width = `${progress}%`;
} catch (error) {
console.error('Upload error:', error);
}
}
progressSection.style.display = 'none';
this.updateStats();
}
async performSearch() {
const query = document.getElementById('searchInput').value.trim();
if (!query) {
this.renderPhotos(this.photos);
return;
}
try {
const response = await fetch(`/search?q=${encodeURIComponent(query)}`);
const results = await response.json();
this.renderPhotos(results);
} catch (error) {
console.error('Search error:', error);
}
}
filterPhotos(filter) {
const filtered = this.photos.filter(photo =>
photo.tags.includes(filter)
);
this.renderPhotos(filtered);
}
renderPhotos(photos) {
const grid = document.getElementById('photoGrid');
grid.innerHTML = '';
photos.forEach(photo => this.addPhotoToGrid(photo));
}
addPhotoToGrid(photo) {
const grid = document.getElementById('photoGrid');
const photoElement = document.createElement('div');
photoElement.className = 'photo-item';
photoElement.onclick = () => this.openModal(photo);
const qualityClass = photo.analysis.quality.score > 0.7 ? 'high-quality' :
photo.analysis.quality.score < 0.3 ? 'low-quality' : '';
photoElement.innerHTML = `
<img src="/${photo.path}" alt="${photo.originalName}" loading="lazy">
<div class="photo-overlay ${qualityClass}">
<div class="photo-tags">
${photo.tags.slice(0, 3).map(tag => `<span class="tag">${tag}</span>`).join('')}
</div>
<div class="photo-stats">
${photo.analysis.faces.length > 0 ? `👥 ${photo.analysis.faces.length}` : ''}
${photo.analysis.text ? '📄' : ''}
${photo.analysis.brands.length > 0 ? '🏷️' : ''}
</div>
</div>
`;
grid.insertBefore(photoElement, grid.firstChild);
}
openModal(photo) {
const modal = document.getElementById('photoModal');
const modalImage = document.getElementById('modalImage');
const modalTitle = document.getElementById('modalTitle');
const analysisResults = document.getElementById('analysisResults');
modalImage.src = `/${photo.path}`;
modalTitle.textContent = photo.originalName;
// Display analysis results
analysisResults.innerHTML = `
<div class="analysis-section">
<h4>🔍 Objects Detected</h4>
<div class="tags-list">
${photo.analysis.objects.map(obj =>
`<span class="analysis-tag">${obj.class} (${Math.round(obj.confidence * 100)}%)</span>`
).join('')}
</div>
</div>
${photo.analysis.faces.length > 0 ? `
<div class="analysis-section">
<h4>👥 People (${photo.analysis.faces.length})</h4>
<p>Face detection completed</p>
</div>
` : ''}
${photo.analysis.text ? `
<div class="analysis-section">
<h4>📄 Text Recognized</h4>
<p class="extracted-text">${photo.analysis.text}</p>
</div>
` : ''}
${photo.analysis.brands.length > 0 ? `
<div class="analysis-section">
<h4>🏷️ Brands Detected</h4>
<div class="tags-list">
${photo.analysis.brands.map(brand =>
`<span class="analysis-tag">${brand.name} (${Math.round(brand.confidence * 100)}%)</span>`
).join('')}
</div>
</div>
` : ''}
<div class="analysis-section">
<h4>✨ Quality Score</h4>
<div class="quality-meter">
<div class="quality-bar" style="width: ${photo.analysis.quality.score * 100}%"></div>
</div>
<p>${Math.round(photo.analysis.quality.score * 100)}% quality</p>
</div>
`;
modal.style.display = 'flex';
}
closeModal() {
document.getElementById('photoModal').style.display = 'none';
}
updateStats() {
const photoCount = document.getElementById('photoCount');
photoCount.textContent = `${this.photos.length} photos`;
}
}
// Initialize the gallery when page loads
document.addEventListener('DOMContentLoaded', () => {
new SmartPhotoGallery();
});
Each uploaded image is analyzed using multiple DeepRequest APIs simultaneously for comprehensive understanding.
Our system generates searchable tags based on:
Users can search by:
Automatically assess and filter photos based on technical quality metrics like blur, lighting, and resolution.
Implement proper rate limiting to avoid exceeding API quotas:
const rateLimit = require('express-rate-limit');
const apiLimiter = rateLimit({
windowMs: 60 * 1000, // 1 minute
max: 60, // limit each IP to 60 requests per windowMs
message: 'Too many analysis requests, please try again later.'
});
app.use('/upload', apiLimiter);
Always implement robust error handling for API calls:
async function safeApiCall(apiFunction, fallbackValue = null) {
try {
return await apiFunction();
} catch (error) {
console.error('API call failed:', error);
return fallbackValue;
}
}
Congratulations! You’ve built a fully functional smart photo gallery that leverages the power of AI to automatically understand, organize, and search through your images. This project demonstrates how DeepRequest’s AI Vision APIs can transform a simple photo storage app into an intelligent content management system.
The techniques you’ve learned here can be applied to many other projects:
Ready to build something amazing? Get your API key and start creating intelligent applications today!
Questions about this tutorial? Want to share what you’ve built? Reach out to us at developers@deeprequest.io or join our community discussions.