-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathindex.html
More file actions
343 lines (292 loc) · 14.9 KB
/
index.html
File metadata and controls
343 lines (292 loc) · 14.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<!-- Meta tags for social media banners, these should be filled in appropriatly as they are your "business card" -->
<!-- Replace the content tag with appropriate information -->
<meta name="description" content="DESCRIPTION META TAG">
<meta property="og:title" content="SOCIAL MEDIA TITLE TAG"/>
<meta property="og:description" content="SOCIAL MEDIA DESCRIPTION TAG TAG"/>
<meta property="og:url" content="URL OF THE WEBSITE"/>
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X630-->
<meta property="og:image" content="static/image/your_banner_image.png" />
<meta property="og:image:width" content="1200"/>
<meta property="og:image:height" content="630"/>
<meta name="twitter:title" content="TWITTER BANNER TITLE META TAG">
<meta name="twitter:description" content="TWITTER BANNER DESCRIPTION META TAG">
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X600-->
<meta name="twitter:image" content="static/images/your_twitter_banner_image.png">
<meta name="twitter:card" content="summary_large_image">
<!-- Keywords for your paper to be indexed by-->
<meta name="keywords" content="KEYWORDS SHOULD BE PLACED HERE">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>GET-3D</title>
<link rel="icon" type="image/x-icon" href="static/images/robot-arm.ico">
<link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
rel="stylesheet">
<link rel="stylesheet" href="static/css/bulma.min.css">
<link rel="stylesheet" href="static/css/bulma-carousel.min.css">
<link rel="stylesheet" href="static/css/bulma-slider.min.css">
<link rel="stylesheet" href="static/css/fontawesome.all.min.css">
<link rel="stylesheet"
href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
<link rel="stylesheet" href="static/css/index.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<script src="https://documentcloud.adobe.com/view-sdk/main.js"></script>
<script defer src="static/js/fontawesome.all.min.js"></script>
<script src="static/js/bulma-carousel.min.js"></script>
<script src="static/js/bulma-slider.min.js"></script>
<script src="static/js/index.js"></script>
</head>
<body>
<section class="hero">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column has-text-centered">
<h1 class="title is-1 publication-title">GET-3D</h1>
<!-- <div class="is-size-5 publication-authors">
<!-- Paper authors -->
<!-- <span class="author-block">
<a href="https://scholar.google.com/citations?user=fJzDsBcAAAAJ&hl=en" target="_blank">Prashant Gaikwat</a></sup>,</span>
<span class="author-block">
<a href="https://scholar.google.co.in/citations?user=dRjWRJcAAAAJ&hl=en" target="_blank">Abhishek Mukhopadhyay</a></sup>,</span>
<span class="author-block">
<a href="https://scholar.google.com/citations?user=4-55tyYAAAAJ&hl=en" target="_blank">Anujith Muraleedharan</a></sup>,</span>
<span class="author-block">
<a href="https://scholar.google.com/citations?user=_UoINqgAAAAJ&hl=en" target="_blank">Mukund Mitra</a></sup>,</span>
<span class="author-block">
<a href="https://scholar.google.co.in/citations?user=Pyj57Y4AAAAJ&hl=en" target="_blank">Pradipta Biswas</a>
</span>
</div>
<div class="is-size-5 publication-authors">
<span class="author-block">Indian Institute of Science, Bengaluru<br>AVIATION Journal Vol 27 No 4 (2023)</span>
</div>
<div class="column has-text-centered">
<div class="publication-links">
<!-- Arxiv PDF link -->
<!-- <span class="link-block">
<a href="https://doi.org/10.3846/aviation.2023.20588" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-file-pdf"></i>
</span>
<span>Paper</span>
</a>
</span>-->
<!-- Supplementary PDF link -->
<!--<span class="link-block">
<a href="static/pdfs/supplementary_material.pdf" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-file-pdf"></i>
</span>
<span>Supplementary</span>
</a>
</span>-->
<!-- Github link -->
<span class="link-block">
<a href="https://github.com/AnujithM/Get-3D.github.io" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fab fa-github"></i>
</span>
<span>Code</span>
</a>
</span>
<!-- ArXiv abstract Link -->
<!--<span class="link-block">
<a href="https://arxiv.org/abs/<ARXIV PAPER ID>" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="ai ai-arxiv"></i>
</span>
<span>arXiv</span>
</a>
</span>-->
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- Teaser video-->
<section class="hero teaser">
<div class="container is-max-desktop">
<div class="hero-body">
<video poster="" id="tree" autoplay controls muted loop height="100%">
<!-- Your video here -->
<source src="static/videos/clip.mp4" type="video/mp4">
</video>
</div>
</div>
</section>
<!-- End teaser video -->
<!-- Paper abstract -->
<section class="section hero is-light">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">Overview</h2>
<div class="content has-text-justified">
<p style="text-align: justify;">
This project focuses on enhancing 2D videos to create a 3D-like immersive experience using a combination of deep learning-based instance segmentation and depth estimation techniques. By leveraging the YOLOv8 instance segmentation model and MiDaS depth estimation model, the system accurately identifies foreground objects and estimates their spatial depth. The goal is to apply dynamic depth-based effects that amplify the perception of depth, making the video content appear more engaging and visually striking without the need for specialized 3D glasses.
</p>
</div>
</div>
</div>
</div>
</section>
<!-- End paper abstract -->
<!-- Image carousel -->
<!--<section class="hero is-small">
<div class="hero-body">
<div class="container">
<h2 class="title is-3 has-text-centered">Overview</h2>
<div class="image-container has-text-centered">
<div id="results-carousel" class="carousel results-carousel">
<div class="item">
<!-- Your image here -->
<!--<img src="static/images/Proposed 1.png" alt="MY ALT TEXT" width="1024" height="968"/>
<h2 class="subtitle has-text-centered">
Schematic diagram of Proposed System
</h2>
</div>
<div class="item">
<!-- Your image here -->
<!--<img src="static/images/Setup1.png" alt="MY ALT TEXT"/>
<h2 class="subtitle has-text-centered">
Setup of Taxiway in the laboratory space.
</h2>
</div>
</div>
</div>
</div>
</section>-->
<!-- End image carousel -->
<!-- Single Video Section -->
<section class="hero is-small">
<div class="hero-body">
<div class="container">
<h2 class="title is-3 has-text-centered">Methodology</h2>
<div class="video-container has-text-centered">
<!-- Your video here with looping enabled -->
<video width="1024" height="768" autoplay loop muted playsinline>
<source src="static/videos/Depth.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<h2 class="subtitle has-text-centered">
<p style="text-align: justify;">
The methodology involves a multi-stage pipeline that integrates object segmentation, depth estimation, and depth-based effect application. Initially, the YOLOv8 instance segmentation model detects and segments objects of interest in each video frame. Simultaneously, the MiDaS model estimates the depth map of the scene, providing pixel-wise depth information. The segmented objects are then analyzed based on their depth and area to determine their eligibility for the 3D enhancement effect. Objects meeting specific depth thresholds are isolated, and zoom effects are applied to simulate depth perception.
To further accentuate the 3D illusion, black vertical bars are added to the background, along with masked horizontal sections at the top and bottom of the frame. These bars serve two purposes: they create a frame-like boundary that enhances the viewer's focus on the foreground objects, simulating a window effect commonly seen in stereoscopic displays, and they reduce peripheral distractions, making the depth cues more prominent. Additionally, geometric transformations, such as resizing and perspective adjustments, are utilized to enhance the illusion of depth. The processed frames are compiled to generate a final video that presents an autostereoscopic 3D-like effect.
</p>
</h2>
</div>
</div>
</div>
</section>
<!-- End Single Video Section -->
<!-- Single Image Section -->
<!--<section class="hero is-small">
<div class="hero-body">
<div class="container">
<h2 class="title is-3 has-text-centered">Results</h2>
<div class="image-container has-text-centered">
<!-- Your image here with reduced dimensions -->
<!--<img src="static/images/Working.png" alt="MY ALT TEXT" width="1024" height="768"/>
<h2 class="subtitle has-text-centered">
<p style="text-align: justify;">
This work presents an end-to-end solution using state-of-the-art components for real-world aircraft navigation, contrasting with existing systems that operate in simulated environments. Traditionally, manual inspection is needed after landing before an aircraft follows the taxiway. Our approach integrates lane and object detection algorithms to automate this process, ensuring collision avoidance and precise stopping. The navigation algorithm controls steering and halts the aircraft if the taxiway ends or the lane is undetected.
</p>
</h2>
</div>
</div>
</div>
</section>-->
<!-- End Single Image Section -->
<!-- Single Image Section -->
<!--<section class="hero is-small">
<div class="hero-body">
<div class="container">
<h2 class="title is-3 has-text-centered">Additional Results</h2>
<div class="image-container has-text-centered">
<!-- Your image here with reduced dimensions -->
<!--<img src="static/images/Controller.png" alt="MY ALT TEXT" width="1024" height="968"/>
<h2 class="subtitle has-text-centered">
<p style="text-align: justify;">
The primary objective revolved around comparing the controllers' ability to effectively correct the Turtlebot's deviation from the central lane. To achieve this, we designed three distinct paths – left, right, and central – and systematically examined the performance of all four controllers across these paths. The findings indicated that the Linear Quadratic Regulator (LQR) exhibited the highest accuracy, with a mean error of 0.26 cm and a standard deviation of 0.94.
</p>
</h2>
</div>
</div>
</div>
</section>-->
<!-- End Single Image Section -->
<!-- Youtube video -->
<!--<section class="hero is-small is-light">
<div class="hero-body">
<div class="container">
<!-- Paper video. -->
<!--<h2 class="title is-3">Adaptive Stamp Localization</h2>
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<div class="publication-video">-->
<!-- Youtube embed code here -->
<!--<iframe src="https://www.youtube.com/embed/JkaxUblCGz0" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
</div>
</div>
</div>
</div>
</div>
</section>-->
<!-- End youtube video -->
<!-- Paper poster -->
<!--<section class="hero is-small is-light">
<div class="hero-body">
<div class="container">
<h2 class="title">Poster</h2>
<iframe src="static/pdfs/IUI2024POSTER.pdf" width="100%" height="550">
</iframe>
</div>
</div>
</section> -->
<!--End paper poster -->
<!--BibTex citation -->
<!-- <section class="section" id="BibTeX">
<div class="container is-max-desktop content">
<h2 class="title">BibTeX</h2>
<pre><code>@article{Gaikwad2023,
title={Developing a computer vision based system for autonomous taxiing of aircraft},
author={Gaikwad, P. and Mukhopadhyay, A. and Muraleedharan, A. and Mitra, M. and Biswas, P.},
journal={Aviation},
volume={27},
number={4},
pages={248--258},
year={2023},
month={Dec},
doi={10.3846/aviation.2023.20588}
}</code></pre>
</div>
</section>-->
<!--End BibTex citation -->
<footer class="footer">
<div class="container">
<div class="columns is-centered">
<div class="column is-8">
<div class="content">
<p>
This page was built using the <a href="https://github.com/eliahuhorwitz/Academic-project-page-template" target="_blank">Academic Project Page Template</a> which was adopted from the <a href="https://nerfies.github.io" target="_blank">Nerfies</a> project page.
<br> This website is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/" target="_blank">Creative
Commons Attribution-ShareAlike 4.0 International License</a>.
</p>
</div>
</div>
</div>
</div>
</footer>
<!-- Statcounter tracking code -->
<!-- You can add a tracker to track page visits by creating an account at statcounter.com -->
<!-- End of Statcounter Code -->
</body>
</html>