Yang, Jianming; Hisey, Rebecca; Bierbrier, Joshua; Fichtinger, Gabor; Law, Christine; Holden, Matthew
Frame Selection Methods to Streamline Surgical Video Annotation for Tool Detection Tasks Proceedings Forthcoming
Forthcoming.
@proceedings{nokey,
title = {Frame Selection Methods to Streamline Surgical Video Annotation for Tool Detection Tasks},
author = {Jianming Yang and Rebecca Hisey and Joshua Bierbrier and Gabor Fichtinger and Christine Law and Matthew Holden},
abstract = {—Given the growing volume of surgical data and the
increasing demand for annotation, there is a pressing need to
streamline the annotation process for surgical videos. Previously,
annotation tools for object detection tasks have greatly evolved,
reducing time expense and enhancing ease. There are also many
initial frame selection approaches for Artificial Intelligence
(AI) assisted annotation tasks to further reduce human effort.
However, these methods have rarely been implemented and
reported in the context of surgical datasets, especially in cataract
surgery datasets. The identification of initial frames to annotate
before the use of any tools or algorithms determines annotation
efficiency. Therefore, in this paper, we chose to prioritize the
development of a method for selecting initial frames to facilitate
the subsequent automated annotation process. We propose a
customized initial frames selection method based on feature
clustering and compare it to commonly used temporal selection
methods. In each method, initial frames from cataract surgery
videos are selected to train a surgical tool detection model.
The model assists in the automated annotation process by
predicting bounding boxes for the surgery video objects in the
remaining frames. Evaluations of these methods are based on
how many edits users need to perform when annotating the
initial frames and how many edits users are expected to perform
to correct all predictions. Additionally, the total annotation cost
for each method is compared. Results indicate that on average,
the proposed cluster-based approach requires the fewest total
edits and exhibits the lowest total annotation cost compared
to conventional methods. These findings highlight a promising
direction for developing a complete application, featuring
streamlined AI-assisted annotation processes for surgical tool
detection tasks.},
keywords = {},
pubstate = {forthcoming},
tppubtype = {proceedings}
}
increasing demand for annotation, there is a pressing need to
streamline the annotation process for surgical videos. Previously,
annotation tools for object detection tasks have greatly evolved,
reducing time expense and enhancing ease. There are also many
initial frame selection approaches for Artificial Intelligence
(AI) assisted annotation tasks to further reduce human effort.
However, these methods have rarely been implemented and
reported in the context of surgical datasets, especially in cataract
surgery datasets. The identification of initial frames to annotate
before the use of any tools or algorithms determines annotation
efficiency. Therefore, in this paper, we chose to prioritize the
development of a method for selecting initial frames to facilitate
the subsequent automated annotation process. We propose a
customized initial frames selection method based on feature
clustering and compare it to commonly used temporal selection
methods. In each method, initial frames from cataract surgery
videos are selected to train a surgical tool detection model.
The model assists in the automated annotation process by
predicting bounding boxes for the surgery video objects in the
remaining frames. Evaluations of these methods are based on
how many edits users need to perform when annotating the
initial frames and how many edits users are expected to perform
to correct all predictions. Additionally, the total annotation cost
for each method is compared. Results indicate that on average,
the proposed cluster-based approach requires the fewest total
edits and exhibits the lowest total annotation cost compared
to conventional methods. These findings highlight a promising
direction for developing a complete application, featuring
streamlined AI-assisted annotation processes for surgical tool
detection tasks.
Yang, Jianming; Hisey, Rebecca; Bierbrier, Joshua; Fichtinger, Gabor; Law, Christine; Holden, Matthew
Frame Selection Methods to Streamline Surgical Video Annotation for Tool Detection Tasks Conference Forthcoming
IEEE, Forthcoming.
@conference{nokey,
title = {Frame Selection Methods to Streamline Surgical Video Annotation for Tool Detection Tasks},
author = {Jianming Yang and Rebecca Hisey and Joshua Bierbrier and Gabor Fichtinger and Christine Law and Matthew Holden},
publisher = {IEEE},
abstract = {Given the growing volume of surgical data and the
increasing demand for annotation, there is a pressing need to
streamline the annotation process for surgical videos. Previously,
annotation tools for object detection tasks have greatly evolved,
reducing time expense and enhancing ease. There are also many
initial frame selection approaches for Artificial Intelligence
(AI) assisted annotation tasks to further reduce human effort.
However, these methods have rarely been implemented and
reported in the context of surgical datasets, especially in cataract
surgery datasets. The identification of initial frames to annotate
before the use of any tools or algorithms determines annotation
efficiency. Therefore, in this paper, we chose to prioritize the
development of a method for selecting initial frames to facilitate
the subsequent automated annotation process. We propose a
customized initial frames selection method based on feature
clustering and compare it to commonly used temporal selection
methods. In each method, initial frames from cataract surgery
videos are selected to train a surgical tool detection model.
The model assists in the automated annotation process by
predicting bounding boxes for the surgery video objects in the
remaining frames. Evaluations of these methods are based on
how many edits users need to perform when annotating the
initial frames and how many edits users are expected to perform
to correct all predictions. Additionally, the total annotation cost
for each method is compared. Results indicate that on average,
the proposed cluster-based approach requires the fewest total
edits and exhibits the lowest total annotation cost compared
to conventional methods. These findings highlight a promising
direction for developing a complete application, featuring
streamlined AI-assisted annotation processes for surgical tool
detection tasks.},
keywords = {},
pubstate = {forthcoming},
tppubtype = {conference}
}
increasing demand for annotation, there is a pressing need to
streamline the annotation process for surgical videos. Previously,
annotation tools for object detection tasks have greatly evolved,
reducing time expense and enhancing ease. There are also many
initial frame selection approaches for Artificial Intelligence
(AI) assisted annotation tasks to further reduce human effort.
However, these methods have rarely been implemented and
reported in the context of surgical datasets, especially in cataract
surgery datasets. The identification of initial frames to annotate
before the use of any tools or algorithms determines annotation
efficiency. Therefore, in this paper, we chose to prioritize the
development of a method for selecting initial frames to facilitate
the subsequent automated annotation process. We propose a
customized initial frames selection method based on feature
clustering and compare it to commonly used temporal selection
methods. In each method, initial frames from cataract surgery
videos are selected to train a surgical tool detection model.
The model assists in the automated annotation process by
predicting bounding boxes for the surgery video objects in the
remaining frames. Evaluations of these methods are based on
how many edits users need to perform when annotating the
initial frames and how many edits users are expected to perform
to correct all predictions. Additionally, the total annotation cost
for each method is compared. Results indicate that on average,
the proposed cluster-based approach requires the fewest total
edits and exhibits the lowest total annotation cost compared
to conventional methods. These findings highlight a promising
direction for developing a complete application, featuring
streamlined AI-assisted annotation processes for surgical tool
detection tasks.
Taylor, Russell H; Menciassi, Arianna; Fichtinger, Gabor; Dario, Paolo
Medical Robotics Journal Article
In: Systems, Research, vol. 52, iss. 6Registration, pp. 1208, 0000.
@article{fichtinger0000c,
title = {Medical Robotics},
author = {Russell H Taylor and Arianna Menciassi and Gabor Fichtinger and Paolo Dario},
url = {https://scholar.google.ca/citations?view_op=view_citation&hl=en&user=_KxkI6UAAAAJ&cstart=300&pagesize=100&citation_for_view=_KxkI6UAAAAJ:N4u4nq0IxgcC},
journal = {Systems, Research},
volume = {52},
issue = {6Registration},
pages = {1208},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sorantin, Erich; Balogh, Emese; Bartroli, Anna Vilanova; Palagyi, Kalman; Nyul, Laszlo G; Loncaric, Sven; Subasic, Marko; Kovacevic, Domagoj; Dudasne-Nagy, Marianna; Mate, Eors; Kari, Bela; Fichtinger, Gabor; DeWeese, Theodore L; Patriciu, Alexandru; Tanacs, Attila; Mazilu, Dumitru; Anderson, James H; Masamune, Ken; Taylor, Russel H; Stoianovici, Dan; Kuba, Attila; Balogh, Emese; Nyul, Laszlo G; Falcao, Alexandre X; Udupa, Jayaram K; Palagyi, Kalman; Sorantin, Erich; Halmai, Csongor; Erdohelyi, Balazs; Palagyi, Kalman; Nyul, Laszlo G; Olle, Krisztian; Geiger, Bernhard; Lindbichler, Franz; Friedrich, Gerhard; Kiesler, Karl
Virtual Dissection of the Colon Journal Article
In: 3D Image Processing–Techniques and Clinical Applications, vol. 21, pp. 263-273, 0000.
@article{fichtinger0000d,
title = {Virtual Dissection of the Colon},
author = {Erich Sorantin and Emese Balogh and Anna Vilanova Bartroli and Kalman Palagyi and Laszlo G Nyul and Sven Loncaric and Marko Subasic and Domagoj Kovacevic and Marianna Dudasne-Nagy and Eors Mate and Bela Kari and Gabor Fichtinger and Theodore L DeWeese and Alexandru Patriciu and Attila Tanacs and Dumitru Mazilu and James H Anderson and Ken Masamune and Russel H Taylor and Dan Stoianovici and Attila Kuba and Emese Balogh and Laszlo G Nyul and Alexandre X Falcao and Jayaram K Udupa and Kalman Palagyi and Erich Sorantin and Csongor Halmai and Balazs Erdohelyi and Kalman Palagyi and Laszlo G Nyul and Krisztian Olle and Bernhard Geiger and Franz Lindbichler and Gerhard Friedrich and Karl Kiesler},
url = {https://scholar.google.com/scholar?cluster=955860295717033248&hl=en&oi=scholarr},
journal = {3D Image Processing–Techniques and Clinical Applications},
volume = {21},
pages = {263-273},
publisher = {Springer Verlag},
abstract = {In this paper, we present a new mathematical method that synthesizes normal data sets for quantification of regional myocardium perfusion. In clinical practice, regional myocardial perfusion is often measured with a gamma camera and quantified via circumferential profile analysis. Normal reference profile data is used to increase the accuracy of the clinical interpretations. Our goal is to create reference data from an existing set of archived studies. An iterative mathematical method, based on two statistical hypotheses, was used to generate the study set instead of collecting normal examinations from a healthy population. Clinical validation is based on interpretations by six independent observers. Results of evaluation with synthesized normal data and its validation are presented.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fichtinger, Gabor; Jain, Ameet; Mustufa, Tabish; Wyrobek, Keenan; Chirikijian, Greg; Zhou, Yu; Burdette, Everette C
Image registration of multiple medical imaging modalities using a multiple degree-of-freedom-encoded fiducial device Journal Article
In: 0000.
@article{fichtinger0000b,
title = {Image registration of multiple medical imaging modalities using a multiple degree-of-freedom-encoded fiducial device},
author = {Gabor Fichtinger and Ameet Jain and Tabish Mustufa and Keenan Wyrobek and Greg Chirikijian and Yu Zhou and Everette C Burdette
},
url = {https://scholar.google.ca/citations?view_op=view_citation&hl=en&user=_KxkI6UAAAAJ&cstart=20&pagesize=80&citation_for_view=_KxkI6UAAAAJ:4fKUyHm3Qg0C},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ungi, Tamas; Moult, Eric; Schwab, Joseph H.; Fichtinger, Gabor
Ultrasound Snapshots in Percutaneous Pedicle Screw Placement Navigation: A Feasibility Study Journal Article
In: Clinical Orthopaedics and Related Research, 0000.
@article{Ungi2013e,
title = {Ultrasound Snapshots in Percutaneous Pedicle Screw Placement Navigation: A Feasibility Study},
author = {Tamas Ungi and Eric Moult and Joseph H. Schwab and Gabor Fichtinger},
url = {https://labs.cs.queensu.ca/perklab/wp-content/uploads/sites/3/2024/02/Ungi2013e_WebVersion.pdf},
journal = {Clinical Orthopaedics and Related Research},
keywords = {},
pubstate = {published},
tppubtype = {article}
}