From 0fab80d617a3bbdb95d0da7609cfc821d73cd06d Mon Sep 17 00:00:00 2001 From: RossTheRoss Date: Thu, 11 Feb 2021 19:34:24 -0600 Subject: add to gitignore --- csci4511w/writing1.tex | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 csci4511w/writing1.tex diff --git a/csci4511w/writing1.tex b/csci4511w/writing1.tex new file mode 100644 index 0000000..f4ee136 --- /dev/null +++ b/csci4511w/writing1.tex @@ -0,0 +1,36 @@ +\documentclass{article} +\usepackage[utf8]{inputenc} +\usepackage{parskip} + +\title{Writing 1} +\author{Matt Strapp} +\date{2021-02-12} +\begin{document} + \maketitle + \section*{Trust and Ethics for AVs} + One of the most important things in the study of ethics is its defintion. Instead of using the defintion supplied in the paper, I will define ethical behavior as the following: + \begin{quote} + \emph{Ethical behavior is behavior that while not always for the better for the individual is beneficial for society as a whole.} + \end{quote} + While this is a flawed defintion, I will use it as I disagree with the one given and believe that utilitarianism would be better for AI purposes. Everything after this will be based on that opinion. + + The paper defines four different definitions for + \begin{quote} + \emph{(SN-0) A robot (or AI or AV) will never harm a human being.} + \end{quote} + + \begin{quote} + \emph{(SN-1) A robot will never deliberately harm a human being.} + \end{quote} + + \begin{quote} + \emph{(SN-2) In a given situation, a robot will be no more likely than a skilled and alert human + to accidentally harm a human being.} + \end{quote} + + \begin{quote} + \emph{(SN-3) A robot must learn to anticipate and avoid Deadly Dilemmas. } + \end{quote} + + +\end{document} \ No newline at end of file -- cgit v1.2.3