%\documentstyle[10pt,twoside]{article}
\documentstyle[twoside, 11pt]{article}
\setlength{\oddsidemargin}{0.25 in}
\setlength{\evensidemargin}{-0.25 in}
\setlength{\topmargin}{-0.6 in}
\setlength{\textwidth}{6.5 in}
\setlength{\textheight}{8.5 in}
\setlength{\headsep}{0.75 in}
\setlength{\parindent}{0 in}
\setlength{\parskip}{0.1 in}
%
% The following commands sets up the lecnum (lecture number)
% counter and make various numbering schemes work relative
% to the lecture number.
%
\newcounter{lecnum}
\renewcommand{\thepage}{\thelecnum-\arabic{page}}
\renewcommand{\thesection}{\thelecnum.\arabic{section}}
\renewcommand{\theequation}{\thelecnum.\arabic{equation}}
\renewcommand{\thefigure}{\thelecnum.\arabic{figure}}
\renewcommand{\thetable}{\thelecnum.\arabic{table}}
%
% The following macro is used to generate the header.
%
\newcommand{\exam}[3]{
\pagestyle{myheadings}
\thispagestyle{plain}
\newpage
\setcounter{page}{1}
\noindent
\begin{center}
\framebox{
\vbox{\vspace{2mm}
\hbox to 6.28in { {\bf CS~151~~~Complexity Theory
\hfill Spring 2021} }
\vspace{4mm}
\hbox to 6.28in { {\Large \hfill #1 \hfill} }
\vspace{2mm}
\hbox to 6.28in { {\it Out: #2 \hfill Due: #3} }
\vspace{2mm}}
}
\end{center}
\vspace*{4mm}
}
%
% Convention for citations is authors' initials followed by the year.
% For example, to cite a paper by Leighton and Maggs you would type
% \cite{LM89}, and to cite a paper by Strassen you would type \cite{S69}.
% (To avoid bibliography problems, for now we redefine the \cite command.)
%
\renewcommand{\cite}[1]{[#1]}
\input{epsf}
%Use this command for a figure; it puts a figure in wherever you want it.
%usage: \fig{NUMBER}{FIGURE-SIZE}{CAPTION}{FILENAME}
\newcommand{\fig}[4]{
\vspace{0.2 in}
\setlength{\epsfxsize}{#2}
\centerline{\epsfbox{#4}}
\begin{center}
Figure \thelecnum.#1:~#3
\end{center}
}
% Use these for theorems, lemmas, proofs, etc.
\newtheorem{theorem}{Theorem}[lecnum]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}[theorem]{Definition}
\newenvironment{proof}{{\bf Proof:}}{\hfill\rule{2mm}{2mm}}
% Some useful equation alignment commands, borrowed from TeX
\makeatletter
\def\eqalign#1{\,\vcenter{\openup\jot\m@th
\ialign{\strut\hfil$\displaystyle{##}$&$\displaystyle{{}##}$\hfil
\crcr#1\crcr}}\,}
\def\eqalignno#1{\displ@y \tabskip\@centering
\halign to\displaywidth{\hfil$\displaystyle{##}$\tabskip\z@skip
&$\displaystyle{{}##}$\hfil\tabskip\@centering
&\llap{$##$}\tabskip\z@skip\crcr
#1\crcr}}
\def\leqalignno#1{\displ@y \tabskip\@centering
\halign to\displaywidth{\hfil$\displaystyle{##}$\tabskip\z@skip
&$\displaystyle{{}##}$\hfil\tabskip\@centering
&\kern-\displaywidth\rlap{$##$}\tabskip\displaywidth\crcr
#1\crcr}}
\makeatother
% **** IF YOU WANT TO DEFINE ADDITIONAL MACROS FOR YOURSELF, PUT THEM HERE:
\begin{document}
\exam{Midterm}{April 29}{{\bf May 6 at 1pm}}
This is a midterm. Collaboration is not allowed. You may consult the course notes and the text (Papadimitriou), but not any other source or person. The full honor code guidelines can be found in the course syllabus.
Please attempt all problems. {\bf Please turn in your solutions via
Gradescope, by 1pm Los Angeles time on the due date.}
\medskip
\begin{enumerate}
\item Show that $\mbox{\bf coNEXP} \subseteq \mbox{\bf
NEXP}/(n+1)$. Here the ``$/(n+1)$'' means that the
nondeterministic machine takes exactly $(n+1)$ bits of advice on
an input of length $n$. Hint: use an idea similar to one you used
for problem 2 on Problem Set 2.
\item Let $f:\{0,1\}^n \rightarrow \{0,1\}$ be an arbitrary
function, and consider the following scenario involving two
parties, Alice and Bob. Alice is given an input $x$ for which
$f(x) = 0$ and Bob is given an input $y$ for which $f(y) = 1$.
They take turns sending bits to each other, and at the end of the
protocol they must announce an index $i$ between $1$ and $n$ on
which $x$ and $y$ differ, i.e., $x_i \ne y_i$. Formally, in the
first round Alice sends $A_1(x) = a_1$ to Bob; Bob sends $B_1(y,
a_1) = b_1$ to Alice; Alice sends $A_2(x, b_1) = a_2$; Bob sends
$B_2(x, a_1, a_2) = b_2$; Alice sends $A_3(x, b_1, b_2) = a_3$;
and so on. In odd steps Alice sends a message that depends on her
input $x$ and the messages she has received so far; in even steps
Bob sends a message the depends on his input $y$ and the messages
he has received so far. In the end, after $k$ rounds Alice
computes $R_A(x, b_1, b_2, \ldots, b_k)$ and Bob computes $R_B(y,
a_1, a_2, \ldots, a_k)$, and these final function evaluations
should both produce the desired index $i$, on which $x_i \ne y_i$.
The protocol must work for all pairs of inputs $x \in f^{-1}(0)$
and $y \in f^{-1}(1)$; the functions $A_i$ and $B_i$ together with
$R_A$ and $R_B$ define a {\em protocol for $f$}.
The {\em communication complexity} for $f$, denoted $C(f)$, is the
minimum, over all protocols for $f$, of the number of bits
exchanged during the protocol. Let $D(f)$ denote the minimum, over
all fan-in two $(\land, \lor, \neg)$ Boolean circuits that compute
$f$, of the depth of the circuit. Below you will prove the
startling fact that these two quantities are essentially the same!
\begin{enumerate}
\item Show that $C(f) \le c_1D(f)$, where $c_1$ is a constant that
does not depend on $f$. Hint: use induction on the depth of a
minimum-depth circuit for $f$.
\item Show that $D(f) \le c_2C(f)$, where $c_2$ is a constant that
does not depend on $f$. Hint: prove a stronger statement as
follows. For every set $X \subseteq f^{-1}(0)$ and $Y \subseteq
f^{-1}(1)$ we say that a {\em protocol for $f$ on $X, Y$} is a
protocol that is only required to work on input pairs $x \in X$
and $y \in Y$ (so a {\em protocol for $f$} as defined above is a
protocol for $f$ on $f^{-1}(0), f^{-1}(1)$). Define $C_{X, Y}(f)$
to be the minimum, over all protocols for $f$ on $X, Y$, of the
number of bits exchanged during the protocol. Prove that for all
$X \subseteq f^{-1}(0)$ and $Y \subseteq f^{-1}(1)$ there is a
circuit with depth at most $c_2C_{X, Y}(f)$ that outputs $0$ on
inputs $x \in X$ and $1$ on inputs $y \in Y$.
\end{enumerate}
\item A {\em branching program} is a directed acyclic graph with three distinguished nodes, called {\em start}, {\em accept}, and {\em reject}. Every node except {\em accept} and {\em reject} is labeled by a positive integer $i$, and has exactly two outgoing edges, one labeled ``0'' and the other
labeled ``1''. An input $x = x_1x_2\ldots x_n$ defines a path
from the start node as follows: at a node labeled $i$, we
follow the outgoing edge whose label coincides with bit
$x_i$ in the input. The path terminates at a sink node (which is either {\em accept} or {\em reject}) and the input is
accepted or rejected accordingly.
Recall that $\mbox{\bf L/poly}$ is the class of languages
decidable by a Turing machine in $O(\log n)$ space with
$\mbox{poly}(n)$ bits of advice. Show that $\mbox{\bf L/poly}$ is
exactly the class of languages decided by polynomial-size
branching programs.
\item Show that $\mbox{\bf NP} \subseteq \mbox{\bf BPP}$ implies
$\mbox{\bf NP} = \mbox{\bf RP}$. Hint: first use error reduction
to reduce the error probability of the $\mbox{\bf BPP}$ machine.
\item
\begin{enumerate}
\item Let $f$ be a family of one-way permutations, and let $b =
\{b_n\}$ be a hard bit for $f^{-1}$. Assume that both $f$ and $b$
are computable in polynomial time. Use $f$ and $b$ to describe a
language $L$ for which $L \in (\mbox{\bf NP} \cap \mbox{\bf coNP})
- \mbox{\bf BPP}$.
(This shows that the assumption we used to construct the BMY
pseudo-random generator placed {\em a priori} bounds on the power
of $\mbox{\bf BPP}$ -- it presumed that $\mbox{\bf BPP}$ was not
powerful enough to simulate $\mbox{\bf NP} \cap \mbox{\bf coNP}$.)
\item Fix a constant $\delta$, and let $g = \{g_n\}$ be a uniform
family of functions for which:
\begin{itemize}
\item each $g_n$ maps $t = O(\log n)$ bits to $m = n^{\delta}$
bits, and is computable in poly($n$) time, and
\item for all circuits $C:\{0,1\}^m \rightarrow \{0,1\}$ of size
at most $m$,
\[\left |\Pr_{y \in \{0,1\}^m}[C(y) = 1] - \Pr_{z \in \{0,1\}^t}[C(g_n(z))=1]\right | < 1/6.\]
\end{itemize}
Use $g$ to describe a language $L \in \mbox{\bf E}$ which does not
have circuits of size $2^{\epsilon n}$, for some constant
$\epsilon > 0$. Hint: refer to a function family obtained by
truncating the output of $g$ to $t+1$ bits.
(Notice that $g$ is a ``Nisan-Wigderson style'' pseudo-random
generator, which we were able to construct based on the assumption
that there is some language in $\mbox{\bf E}$ that does not have
circuits of size $2^{\epsilon n}$ for some constant $\epsilon$.
This problem shows that this assumption is also {\em necessary}
for the existence of such generators.)
\end{enumerate}
\end{enumerate}
% If you need to add references, use the following format:
%\section*{References}
%
%\begin{itemize}
%\item[CW87] {\sc D.~Coppersmith} and {\sc S.~Winograd},
%Matrix multiplication via arithmetic progressions,
%{\it Proceedings of the 19th ACM Symposium on Theory of Computing},
%1987, pp.~1--6.
%
%\item[S69] {\sc V.~Strassen}, Gaussian Elimination Is Not Optimal,
%{\it Numerische Mathematik\/~\bf13}, 1969, pp.~354--356.
%
%\item[P84] {\sc V.~Pan}, {\it How To Multiply Matrices Faster},
%Springer-Verlag, Lecture Notes in Computer Science Vol.~179, 1984.
%
%\end{itemize}
\end{document}