@article{SudarevicTroyaFuchsetal.2023, author = {Sudarevic, Boban and Troya, Joel and Fuchs, Karl-Hermann and Hann, Alexander and Vereczkei, Andras and Meining, Alexander}, title = {Design and development of a flexible 3D-printed endoscopic grasping instrument}, series = {Applied Sciences}, volume = {13}, journal = {Applied Sciences}, number = {9}, issn = {2076-3417}, doi = {10.3390/app13095656}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-319186}, year = {2023}, abstract = {(1) Background: Interventional endoscopic procedures are growing more popular, requiring innovative instruments and novel techniques. Three-dimensional printing has demonstrated great potential for the rapid development of prototypes that can be used for the early assessment of various concepts. In this work, we present the development of a flexible endoscopic instrument and explore its potential benefits. (2) Methods: The properties of the instrument, such as its maneuverability, flexibility, and bending force, were evaluated in a series of bench tests. Additionally, the effectiveness of the instrument was evaluated in an ex vivo porcine model by medical experts, who graded its properties and performance. Furthermore, the time necessary to complete various interventional endoscopic tasks was recorded. (3) Results: The instrument achieved bending angles of ±216° while achieving a bending force of 7.85 (±0.53) Newtons. The time needed to reach the operating region was 120 s median, while it took 70 s median to insert an object in a cavity. Furthermore, it took 220 s median to insert the instrument and remove an object from the cavity. (4) Conclusions: This study presents the development of a flexible endoscopic instrument using three-dimensional printing technology and its evaluation. The instrument demonstrated high bending angles and forces, and superior properties compared to the current state of the art. Furthermore, it was able to complete various interventional endoscopic tasks in minimal time, thus potentially leading to the improved safety and effectiveness of interventional endoscopic procedures in the future.}, language = {en} } @article{KrenzerBanckMakowskietal.2023, author = {Krenzer, Adrian and Banck, Michael and Makowski, Kevin and Hekalo, Amar and Fitting, Daniel and Troya, Joel and Sudarevic, Boban and Zoller, Wolfgang G. and Hann, Alexander and Puppe, Frank}, title = {A real-time polyp-detection system with clinical application in colonoscopy using deep convolutional neural networks}, series = {Journal of Imaging}, volume = {9}, journal = {Journal of Imaging}, number = {2}, issn = {2313-433X}, doi = {10.3390/jimaging9020026}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-304454}, year = {2023}, abstract = {Colorectal cancer (CRC) is a leading cause of cancer-related deaths worldwide. The best method to prevent CRC is with a colonoscopy. During this procedure, the gastroenterologist searches for polyps. However, there is a potential risk of polyps being missed by the gastroenterologist. Automated detection of polyps helps to assist the gastroenterologist during a colonoscopy. There are already publications examining the problem of polyp detection in the literature. Nevertheless, most of these systems are only used in the research context and are not implemented for clinical application. Therefore, we introduce the first fully open-source automated polyp-detection system scoring best on current benchmark data and implementing it ready for clinical application. To create the polyp-detection system (ENDOMIND-Advanced), we combined our own collected data from different hospitals and practices in Germany with open-source datasets to create a dataset with over 500,000 annotated images. ENDOMIND-Advanced leverages a post-processing technique based on video detection to work in real-time with a stream of images. It is integrated into a prototype ready for application in clinical interventions. We achieve better performance compared to the best system in the literature and score a F1-score of 90.24\% on the open-source CVC-VideoClinicDB benchmark.}, language = {en} } @article{KrenzerMakowskiHekaloetal.2022, author = {Krenzer, Adrian and Makowski, Kevin and Hekalo, Amar and Fitting, Daniel and Troya, Joel and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Fast machine learning annotation in the medical domain: a semi-automated video annotation tool for gastroenterologists}, series = {BioMedical Engineering OnLine}, volume = {21}, journal = {BioMedical Engineering OnLine}, number = {1}, doi = {10.1186/s12938-022-01001-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300231}, year = {2022}, abstract = {Background Machine learning, especially deep learning, is becoming more and more relevant in research and development in the medical domain. For all the supervised deep learning applications, data is the most critical factor in securing successful implementation and sustaining the progress of the machine learning model. Especially gastroenterological data, which often involves endoscopic videos, are cumbersome to annotate. Domain experts are needed to interpret and annotate the videos. To support those domain experts, we generated a framework. With this framework, instead of annotating every frame in the video sequence, experts are just performing key annotations at the beginning and the end of sequences with pathologies, e.g., visible polyps. Subsequently, non-expert annotators supported by machine learning add the missing annotations for the frames in-between. Methods In our framework, an expert reviews the video and annotates a few video frames to verify the object's annotations for the non-expert. In a second step, a non-expert has visual confirmation of the given object and can annotate all following and preceding frames with AI assistance. After the expert has finished, relevant frames will be selected and passed on to an AI model. This information allows the AI model to detect and mark the desired object on all following and preceding frames with an annotation. Therefore, the non-expert can adjust and modify the AI predictions and export the results, which can then be used to train the AI model. Results Using this framework, we were able to reduce workload of domain experts on average by a factor of 20 on our data. This is primarily due to the structure of the framework, which is designed to minimize the workload of the domain expert. Pairing this framework with a state-of-the-art semi-automated AI model enhances the annotation speed further. Through a prospective study with 10 participants, we show that semi-automated annotation using our tool doubles the annotation speed of non-expert annotators compared to a well-known state-of-the-art annotation tool. Conclusion In summary, we introduce a framework for fast expert annotation for gastroenterologists, which reduces the workload of the domain expert considerably while maintaining a very high annotation quality. The framework incorporates a semi-automated annotation system utilizing trained object detection models. The software and framework are open-source.}, language = {en} } @phdthesis{Loho2005, author = {Loho, Etienne}, title = {Einsatz starrer Endoskope zur Entfernung von Fremdk{\"o}rpern der Luft- und oberen Speisewege}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-12607}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {Der Einsatz starrer Endoskope zur Entfernung von Fremdk{\"o}rpern der Luft- und oberen Speisewege ist Thema dieser retrospektiven Studie, die sich ferner mit endoskopischen Alternativverfahren auseinandersetzt. Da bei einer Vielzahl von Patienten, die wegen Fremdk{\"o}rperverdachts {\"o}sophageal und/oder tracheobronchial endoskopiert wurden, keine Fremdk{\"o}rperpersistenz nachgewiesen werden konnten, soll die Frage nach den f{\"u}r dieses Ph{\"a}nomen verantwortlichen Faktoren gekl{\"a}rt werden. Die Anamnese, klinischer Befund und die Gegen{\"u}berstellung von Verdachtsdiagnose und endg{\"u}ltiger (postoperativer) Diagnose soll {\"a}tiologische Zusammenh{\"a}nge kl{\"a}ren und zeigt konkrete Anwendungsm{\"o}glichkeiten der gewonnenen Erkenntnisse auf.}, language = {de} }