2020
-
J. Villagra, A. Artunedo, V. Trentin, and J. Godoy, “Interaction-aware risk assessment: focus on the lateral intention,” in 2020 IEEE 3rd Connected and Automated Vehicles Symposium (CAVS), 2020.
[Bibtex]@InProceedings{Villagra2020_interaction-aware, author = {Jorge Villagra and Antonio Artunedo and Vinicius Trentin and Jorge Godoy}, title = {Interaction-aware risk assessment: focus on the lateral intention}, booktitle = {2020 IEEE 3rd Connected and Automated Vehicles Symposium (CAVS)}, year = {2020}, publisher = {IEEE}, }
2019
-
J. F. Medina-Lee, V. Trentin, and J. Villagra, “Framework for motion prediction of vehicles in a simulation environment,” in Actas de las XL Jornadas de Automática, Ferrol, 4-6 de Septiembre de 2019, 2019, p. 520–527.
[Bibtex]@InProceedings{medina2019framework, author = {J. F. {Medina-Lee} and Trentin, Vinicius and Villagra, Jorge}, title = {Framework for motion prediction of vehicles in a simulation environment}, booktitle = {Actas de las XL Jornadas de Autom{\'a}tica, Ferrol, 4-6 de Septiembre de 2019}, year = {2019}, pages = {520--527}, publisher = {Universidade da Coru{\~n}a}, doi = {https://doi.org/10.17979/spudc.9788497497169.520}, journal = {Actas de las XL Jornadas de Autom{\'a}tica, Ferrol, 4-6 de Septiembre de 2019}, url = {https://doi.org/10.17979/spudc.9788497497169.520}, }
-
V. Trentin, R. S. Guerra, and G. R. Librelotto, “Contradictions in assessing human morals and the ethical design of autonomous vehicles,” in XVI Latin American Robotics Symposium and VII Brazilian Robotics Symposium (LARS/SBR), Rio Grande, Brazil, 2019-10-25 2019.
[Bibtex]@InProceedings{Trentin2019, author = {Vinicius Trentin and Rodrigo S. Guerra and Giovani Rubert Librelotto}, title = {Contradictions in assessing human morals and the ethical design of autonomous vehicles}, booktitle = {XVI Latin American Robotics Symposium and VII Brazilian Robotics Symposium (LARS/SBR)}, year = {2019}, address = {Rio Grande, Brazil}, publisher = {IEEE}, abstract = {Autonomous vehicles (AVs) promise to bring many benefits to society, such as safety, an increase of accessibility and life quality, among others. Unlike humans, they do not get tired and, supposedly, do not fail. However, there might be cases where, due to limit visibility, occlusions or even a sensor failure, the system might not be capable to detect one or more obstacles along the vehicle's path early enough to avoid a crash. Although these situations might be rare if one considers a single vehicle, if predictions are correct, these AVs are to be adopted in large quantities in the near future, making even rare situations more commonplace. AVs will have to deal with these forced-choice situations in the best possible way. This paper presents a review of the ethical discussion regarding the matter of AVs and an analysis of a questionnaire implemented by the authors. Our results show evidence for several types of contradictory choices made by the subjects, which suggest the moral choices do not necessarily follow strict logical reasoning.}, date = {2019-10-25}, pubstate = {published}, tppubtype = {inproceedings}, }