Commit ebbfb547 authored by Antonin Dudermel's avatar Antonin Dudermel
Browse files

Merge branch 'main' of gitlab.aliens-lyon.fr:aduderme/rapport-stage-m2 into main

parents 0c144527 94e96f40
@misc{2015ieee17882015,
title = {{{IEEE}} 1788-2015 - {{IEEE Standard}} for {{Interval Arithmetic}}},
year = {2015},
file = {/home/antonin/Zotero/storage/HNGLDC3Q/1788-2015.html},
howpublished = {https://standards.ieee.org/standard/1788-2015.html}
}
@article{2019digitalbiquad,
title = {Digital Biquad Filter},
year = {2019},
......@@ -11,15 +18,15 @@
language = {en}
}
@article{2021ieee754,
title = {{{IEEE}} 754},
year = {2021},
month = apr,
abstract = {The IEEE Standard for Floating-Point Arithmetic (IEEE 754) is a technical standard for floating-point arithmetic established in 1985 by the Institute of Electrical and Electronics Engineers (IEEE). The standard addressed many problems found in the diverse floating-point implementations that made them difficult to use reliably and portably. Many hardware floating-point units use the IEEE 754 standard. The standard defines: arithmetic formats: sets of binary and decimal floating-point data, which consist of finite numbers (including signed zeros and subnormal numbers), infinities, and special "not a number" values (NaNs) interchange formats: encodings (bit strings) that may be used to exchange floating-point data in an efficient and compact form rounding rules: properties to be satisfied when rounding numbers during arithmetic and conversions operations: arithmetic and other operations (such as trigonometric functions) on arithmetic formats exception handling: indications of exceptional conditions (such as division by zero, overflow, etc.)IEEE 754-2008, published in August 2008, includes nearly all of the original IEEE 754-1985 standard, plus the IEEE 854-1987 Standard for Radix-Independent Floating-Point Arithmetic. The current version, IEEE 754-2019, was published in July 2019. It is a minor revision of the previous version, incorporating mainly clarifications, defect fixes and new recommended operations.},
annotation = {Page Version ID: 1016159052},
copyright = {Creative Commons Attribution-ShareAlike License},
journal = {Wikipedia},
language = {en}
@article{2019ieee7542019,
title = {{{IEEE}} 754-2019 - {{IEEE Standard}} for {{Floating}}-{{Point Arithmetic}}},
year = {2019},
pages = {1--84},
doi = {10.1109/IEEESTD.2019.8766229},
abstract = {This standard specifies interchange and arithmetic formats and methods for binary and decimal floating-point arithmetic in computer programming environments. This standard specifies exception conditions and their default handling. An implementation of a floating-point system conforming to this standard may be realized entirely in software, entirely in hardware, or in any combination of software and hardware. For operations specified in the normative part of this standard, numerical results and exceptions are uniquely determined by the values of the input data, sequence of operations, and destination formats, all under user control.},
file = {/home/antonin/Zotero/storage/7YXEVL98/8766229.html},
journal = {IEEE Std 754-2019 (Revision of IEEE 754-2008)},
keywords = {arithmetic,binary,computer,decimal,exponent,floating-point,Floating-point arithmetic,format,IEEE 754,IEEE Standards,interchange,NaN,number,rounding,significand,subnormal.}
}
@article{adje2021fastefficient,
......@@ -78,13 +85,22 @@
file = {/home/antonin/Zotero/storage/3GGBLLIE/CousotCousot-POPL-77-ACM-p238--252-1977.pdf}
}
@article{dedinechincertifyingfloatingpoint,
title = {Certifying Floating-Point Implementations Using {{Gappa}}},
@article{dedinechin2011certifyingfloatingpoint,
title = {Certifying the Floating-Point Implementation of an Elementary Function Using {{Gappa}}},
author = {{de Dinechin}, Florent and Lauter, Christoph and Melquiond, Guillaume},
pages = {21},
abstract = {High confidence in floating-point programs requires proving numerical properties of final and intermediate values. One may need to guarantee that a value stays within some range, or that the error relative to some ideal value is well bounded. Such work may require several lines of proof for each line of code, and will usually be broken by the smallest change to the code (e.g. for maintenance or optimization purpose). Certifying these programs by hand is therefore very tedious and error-prone. This article discusses the use of the Gappa proof assistant in this context. Gappa has two main advantages over previous approaches: Its input format is very close to the actual C code to validate, and it automates error evaluation and propagation using interval arithmetic. Besides, it can be used to incrementally prove complex mathematical properties pertaining to the C code. Yet it does not require any specific knowledge about automatic theorem proving, and thus is accessible to a wide community. Moreover, Gappa may generate a formal proof of the results that can be checked independently by a lower-level proof assistant like Coq, hence providing an even higher confidence in the certification of the numerical code. The article demonstrates the use of this tool on a real-size example, an elementary function with correctly rounded output.},
file = {/home/antonin/Zotero/storage/NBYNQ7A5/de Dinechin et al. - Certifying floating-point implementations using Gap.pdf},
language = {en}
year = {2011},
month = feb,
volume = {60},
pages = {242--253},
publisher = {{IEEE}},
doi = {10.1109/TC.2010.128},
journal = {IEEE Transactions on Computers},
nourl = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5483294},
number = {2},
url-hal = {http://hal.inria.fr/inria-00533968/en},
x-editorial-board = {yes},
x-id-hal = {inria-00533968},
x-international-audience = {yes}
}
@inproceedings{dinechin2021arithmeticcenteredfilter,
......@@ -132,12 +148,6 @@
number = {1}
}
@misc{ieee17882015,
title = {{{IEEE}} 1788-2015 - {{IEEE Standard}} for {{Interval Arithmetic}}},
file = {/home/antonin/Zotero/storage/HNGLDC3Q/1788-2015.html},
howpublished = {https://standards.ieee.org/standard/1788-2015.html}
}
@misc{inriaforgempfi,
title = {{{InriaForge}}: {{MPFI}}: {{Project Home}}},
file = {/home/antonin/Zotero/storage/PAK6NUNG/mpfi.html},
......@@ -267,6 +277,17 @@
language = {en}
}
@article{ugurdag2017hardwaredivision,
title = {Hardware Division by Small Integer Constants},
author = {Ugurdag, H. Fatih and {de Dinechin}, Florent and Gener, Y. Serhan and G{\"o}ren, Sezer and Didier, Laurent-St{\'e}phane},
year = {2017},
volume = {66},
pages = {2097--2110},
journal = {IEEE Transactions on Computers},
nopublisher = {IEEE},
number = {12}
}
@book{vetterli2014foundationssignal,
title = {Foundations of Signal Processing},
author = {Vetterli, Martin and Kova{\v c}evi{\'c}, Jelena and Goyal, Vivek K.},
......
process = _ <: *(15), _ <: <(12), *(4), >=(12), *(.25) + .75: *, * :> _ ;
process = _ <: *(15), _ <:
<(12), *(4), >=(12), *(.25) + .75:
*, * :> _ ;
@rnd = float<ieee_32, ne>;
x = rnd(xx);
t rnd= (1 - x);
y rnd= x * t;
z = x * (1 - x);
@rnd = float<ieee_32, ne>; # definition of a rounding mode
x = rnd(xx); # the input value itself is a FP number
t rnd= (1 - x); # one intermediate value of the code
y rnd= x * t; # another line describing what is computed
z = x * (1 - x); # this is the mathematical ideal value
# without rounding
{ x in [0, 1] -> y in ? /\ z - y in ? }
{ x in [0, 1] -> y in ? /\ z - y in ? } # the theorem to prove
z -> 0.25 - (x - 0.5) * (x - 0.5);
\documentclass{beamer}
\documentclass{beamer}
\addtobeamertemplate{navigation symbols}{}{%
\usebeamerfont{footline}%
......@@ -49,8 +49,8 @@
\subtitle{M2 internship Report}
\author%[Ad,Bc] % (optional, use only with lots of authors)
{Antonin Dudermel\inst{1} \\ supervised by\\ Florent de Dinechin\inst{2} \and Stéphane
Letz\inst{3} \and Yann Orlarey\inst{3} \and Tanguy Risset\inst{2} }
{Antonin Dudermel\inst{1} \\ supervised by\\ Florent de Dinechin\inst{2} \and
Stéphane Letz\inst{3} \and Yann Orlarey\inst{3} \and Tanguy Risset\inst{2} }
% - Use the \inst{?} command only if the authors have different
% affiliation.
......
......@@ -22,6 +22,7 @@
\newcommand{\rnd}{\circ}
\newcommand{\op}{\diamond}
\newcommand{\Ops}{\text{Ops}}
\newcommand{\fq}{\nu}
\newcommand{\ninf}[1]{\left\| #1 \right\|_\infty}
\newcommand{\ens}[1]{\left\{ #1 \right\}}
......
......@@ -30,25 +30,27 @@
}
\newcommand{\computeraudio}{
\begin{tikzpicture}[every node/.style={draw, inner sep=6}]
\begin{scope}[local bounding box=hp]
\hp
\end{scope}
\begin{scope}[shift={(1,.4)}, local bounding box=mic]
\mic
\end{scope}
\fill (mic.south) circle (1pt);
\fill (hp.south) circle (1pt);
\node[draw] (SC) at ($(mic.south) + (2, -0.2)$) {Sound Card};
\node (K) at ($(SC.east) + (1.5, 0)$) {Kernel};
\node (P) at ($(K.east) + (1.5, 0)$) {CPU};
\node (M) at ($(P.east) + (1.5, 0)$) {RAM};
\draw[->-] (mic.south) -- ($(SC.west) + (0, 0.2)$);
\draw[->-] ($(SC.west) + (0, -0.2)$) -- (hp.south);
\lr{SC}{K}
\lr{K}{P}
\lr{P}{M}
\end{tikzpicture}
\begin{tikzpicture}[every node/.style={inner sep=6}]
\begin{scope}[local bounding box=hp]
\hp
\end{scope}
\begin{scope}[shift={(1,.4)}, local bounding box=mic]
\mic
\end{scope}
\fill (mic.south) circle (1pt);
\fill (hp.south) circle (1pt);
\node[draw] (SC) at ($(mic.south) + (2, -0.2)$) {Sound Card};
\node[draw] (K) at ($(SC.east) + (3, 0)$) {Kernel};
\node[draw] (P) at ($(K.east) + (1.5, 0)$) {OS};
\draw[->-] (mic.south) -- ($(SC.west) + (0, 0.2)$);
\draw[->-] ($(SC.west) + (0, -0.2)$) -- (hp.south);
\foreach \y in {1, ..., 8}{
\draw (SC.north east) rectangle ($(SC.south east) + (\y / 10, 0)$);
}
\draw ($(SC.north east) + (0.4, 0.2)$) node {\tiny buffer};
\lr{SC}{K}
\lr{K}{P}
\end{tikzpicture}
}
\newcommand{\asicaudio}{
......
\documentclass[report.tex]{subfiles}
\begin{document}
\section{Annex}
\section{Appendix}
\label{sec:annex}
\subsection{Real-life Floating-Point intervals}
\subsection{Faust implementation technicalities}
\label{sec:real-life-floating}
Some details were hidden when presenting the lattice used in the Faust
......@@ -17,73 +17,41 @@ number of floats, so this lattice is technically also finite, but from an
implementation point of view, a lattice with \(2^{64}\) elements is as good as
infinite.
This has however some useful implications. Going back on the smoother
example~\figref{fig:smooth}. Assuming no widening happens until step 53, the value
of the interval \(Y(53)\) is \([-2 + 2^{-52}, 2 - 2^{-52}]\), and the value of
\(Y(54)\) is
\(Y(54) = Y(53) \vee F(Y(53)) = F(Y(53)) = [-1, 1] + [-1 + 2^{-52}, 1 - 2^{-52}]\).
But \(Y(54)\) cannot be equal to \([-2 + 2^{-53}, 2 - 2^{-53}]\): both of these
number must be represented on a mantissa with 54 bits, which is too big for
double-precision floating-point numbers. Focusing on the upperbound, to get
\(Y(54)\) we compute \(1 + (1 - 2^{-53})\). Due to rounding, its result is 2, as
shown in \figref{fig:float-plus}. So we have the following trace
\begin{figure}
\centering
\begin{tikzpicture}[scale=0.5]
\fixpointpositions{0}{1}{-8}{-7.5}
% \begin{figure}
% \centering
% \begin{tikzpicture}[scale=0.5]
% \fixpointpositions{0}{1}{-8}{-7.5}
\fixpointnumber{0}{1}{1}{-7.5}
\draw (-3, -6.55) -- (8, -6.55);
\draw (-2.5,-5.8) node{\(+\)};
\draw (7, -5.3) \drawbit[fill=green];
\fixpointnumber{0}{0}{-7}{-6}
% \fixpointnumber{0}{1}{1}{-7.5}
% \draw (-3, -6.55) -- (8, -6.55);
% \draw (-2.5,-5.8) node{\(+\)};
% \draw (7, -5.3) \drawbit[fill=green];
% \fixpointnumber{0}{0}{-7}{-6}
\draw (-3, -4.05) -- (8, -4.05);
\draw (8, -3.8) \drawbit;
\draw (-2.5,-3.5) node{\(+\)};
\fixpointnumber{0}{-1}{-7}{-3.5}
\fixpointnumber{0}{0}{0}{-2.5}
% \draw (-3, -4.05) -- (8, -4.05);
% \draw (8, -3.8) \drawbit;
% \draw (-2.5,-3.5) node{\(+\)};
% \fixpointnumber{0}{-1}{-7}{-3.5}
% \fixpointnumber{0}{0}{0}{-2.5}
\draw (-3, -1.55) -- (8, -1.55);
\fixpointnumber{0}{-1}{-8}{-1}
\fixpointnumber{0}{0}{0}{0}
% \draw (-3, -1.55) -- (8, -1.55);
% \fixpointnumber{0}{-1}{-8}{-1}
% \fixpointnumber{0}{0}{0}{0}
\draw[->] (7.5, -3.3) -- (6.5, -4.8);
% \draw[->] (7.5, -3.3) -- (6.5, -4.8);
\draw (9, -1) node[anchor=west] {align and truncate mantiss\ae};
\draw (9, -3.5) node[anchor=west] {perform the addition};
\draw (9, -6) node[anchor=west] {final rounding};
% \draw (9, -1) node[anchor=west] {align and truncate mantiss\ae};
% \draw (9, -3.5) node[anchor=west] {perform the addition};
% \draw (9, -6) node[anchor=west] {final rounding};
\end{tikzpicture}
\caption{Floating-point inexact addition}
\label{fig:float-plus}
\end{figure}
\[
\begin{array}{|r||c|c|c|c|c|}
\hline
t & c_- & c_+ & I = Y(t) & I' = [-1, 1] + [1/2] * S(t) & S(t) \vee F(S(t))\\
\hline
\dots & \dots & \dots & \dots & \dots & \dots \\
53 & 53 & 53 & [-2 + 2^{-52}, 2 - 2^{-52}] & [-2, 2] & [-2 , 2]\\
54 & 54 & 54 & [-2, 2] & [-2, 2] & [-2, 2] \\
\hline
\end{array}
\]
And the algorithm terminates with a tight answer, whereas without widening, it
should not terminate at all. However, we are very lucky on this case. Replacing
the smoothing constant to get a smoother filter, \(0.999\) for instance as in
the standard library, we would have needed \(\lceil -53/\log(0.999)\rceil = 36719\)
iterations.
% \end{tikzpicture}
% \caption{Floating-point inexact addition}
% \label{fig:float-plus}
% \end{figure}
% This has however some strange implications. Going back on the ramp
% example~\figref{fig:ramp}. When outputting C++, the Faust compiler represents the
% integers as ints (\cint{} in C \ie{} \(\sfix(31,0)\)), so there is in fact
% integers as ints (\cint{} in C \ie \(\sfix(31,0)\)), so there is in fact
% a\(\mod 2^{32}\) operator hidden inside the \texttt{+}. As result, the signal
% is bounded by \([-2^{31}, 2^{31}-1]\). But looking at intervals without widening
% is also really instructive on this example. At step \(2^{27} = 134217728\), the
......@@ -106,34 +74,6 @@ iterations.
% \label{fig:float-plus}
% \end{figure}
\subsection{Lattices on Signals}
\label{sec:lattice-signals}
Another problem relies in signals. Indeed, in signal processing, every signal
has value \(0\) before the time \(0\). For instance, the ``constant'' signal
\(5\) is in fact the signal
\[
\left\{
\begin{array}{lr}
s(t) = 0 & t < 0 \\
s(t) = 5 & t \geq 0
\end{array}
\right.
\]
so its interval should be \([0, 5]\) and not \([5,5]\). So, on signals, the
lattice considered should not be the lattice of floating-point intervals, but
the lattice of floating-point intervals containing \(0\) (one just need to
replace \(\bot := \emptyset\) by \(\bot := [0, 0]\)). However, this is not very
convenient. For instance, the expression \(1 / 5\) should be translated in
interval as \([0, 1] / [0, 5]\) id est \([0, 1] \times [1/5, 1/0]\) which is by
convention \([0, +\infty]\). The choice has been made to keep the lattice of
floating-point intervals for constant signals, and other signals such as sliders
output. As result, the lattice used in Faust is in fact the cartesian product of
floating-point interval lattices and floating-point interval containing zero
lattices.
\subsection{Lattice Proofs}
\label{sec:lattice-proofs}
......@@ -150,7 +90,7 @@ Take \(F\) increasing having a post-fixpoint \(Y\). We will prove by induction
fixpoint, we have \(F^{(t+1)}(\bot) \leq F(Y) \leq Y\).
\end{itemize}
So \(\forall t\in\Nat, F^{(t)}(\bot) \leq Y\) \ie{}
So \(\forall t\in\Nat, F^{(t)}(\bot) \leq Y\) \ie
\(\bigvee_{t\in\Nat} F^{(t)}(\bot) \leq Y\) \qed
\subsubsection*{Proof of theorem \ref{th:down}}
......@@ -161,22 +101,14 @@ By case disjunction:
\begin{itemize}
\item If \(N < M\), we can show as above that for any \(t\),
\(F^{(t)}(\bot) \leq F^{(m - n + t)}(\top)\), particularily for \(t = n\)
\item If \(N \geq{} M\), similarily for any \(t\),
\(F^{(n - m + t)}(\bot) \leq F^{(t)}(\top)\), particularily for \(t = m\)
\(F^{(t)}(\bot) \leq F^{(m - n + t)}(\top)\), particularly for \(t = n\)
\item If \(N \geq{} M\), similarly for any \(t\),
\(F^{(n - m + t)}(\bot) \leq F^{(t)}(\top)\), particularly for \(t = m\)
\end{itemize}\qed
\section*{Copyright}
\section*{Acknowledgement}
\label{sec:copy}
\begin{itemize}
\item The picture of the \figref{fig:zybo} is taken from Digilent Website:\\
https://reference.digilentinc.com/\_media/reference/programmable-logic/zybo/zybo-2.png
\item The \figref{fig:bell-impl} is made by Emma Neiss
\item Figures \ref{fig:difffix},\ref{fig:sfix} are made by Florent de Dinechin
\end{itemize}
Figures \ref{fig:bell-impl}, \ref{fig:difffix},\ref{fig:sfix} are made by
Florent de Dinechin
\end{document}
%%% Local IspellDict: en
......@@ -4,9 +4,9 @@
\section{Conclusion and Future Work}
\label{sec:conclusion}
During this internship we have been devising algorithm to allow the compilation
During this internship we have been devising algorithms to allow the compilation
of Faust programs on targets using fixed-point arithmetic. As fixed-point
formats are very specific, except from beeing drastically suboptimal, one has to
formats are very specific, to avoid being drastically suboptimal, one has to
choose one fixed-point format (MSB and LSB indexes) per signal. To determine the
MSB index of each signal, we proposed and implemented a bounding method based on
the abstract interpretation of the program on the interval lattice. Yet, as
......@@ -17,29 +17,42 @@ an operator type and the LSB of its inputs and outputs were drastically
suboptimal, as even on small examples, the optimal LSB index was depending on
the implemented processor.
\todo{Résumer le tout de manière compacte, parler de ce qu'on n'a pas utilisé,
les unités, une approche top-down avec la précision des sliders et les erreurs
de quantification en entrée, approche statistique avec papiers de D. Ménard}
\subsection{Short-term Perspectives}
\label{sec:short-term-persp}
We have shown that finding an optimal pair (MSB, LSB) on each signal is
something relatively hard. Yet, if we want to compile on FPGA, we have to do a
pragmatic choice and accept to choose less optimal values. The first very naive
idea is to set everything in \(\sfix(0, -23)\). There might be overflows, there
is no guarantee in accuracy, but the datapath is constant. If we are confident
on the MSB computed, we can refine this a little more giving
\(\sfix(\msb_s,\msb_s - 23)\) for each signal \(s(t)\) with computed MSB
\({msb_s}\), falling back on a default value if the MSB computation fails, or
indicating to the user where an annotation is needed. We still have a constant
datapath, but we no longer risk overflow, besides, we increase accuracy for
signals bounded by values smaller than \(1/2\).
If there is no interest in the accuracy of the result, the scale-to-worse pass
may be an interesting option: by implementing static floating-point
without-exponent, we are already sparing 8 bits per signal, plus silicon on
complex circuits that handle exponent cases.
\subsection{Medium-term Perspectives}
\label{sec:medi-term-persp}
Our upper bounds are for the moment far from being satisfying and useable in
Our upper bounds are for the moment far from being satisfying and usable in
practice. Yet, each improvement on the interval library and on the normal forms
will also improve the abstract analysis. So there is hope that the new interval
arithmetic library actually in development by the Faust team will improve the
upper bound. Similarily, there might be interesting work to be done on the
upper bound. Similarly, there might be interesting work to be done on the
normal form concerning delays. For the moment \lstinline{1' + 2'} (the constant
signal 1 delayed by one sample (\ie{} the signal \(s(0) = 0, s(t) = 1, t > 0\)
signal 1 delayed by one sample (\ie the signal \(s(0) = 0, s(t) = 1, t > 0\)
plus the constant signal 2 delayed by one sample) is equivalent to the signals
\lstinline{(1 + 2)'} or \lstinline{3'} (the constant signal 3 delayed by one
sample) but for the moment, the normal form does not handle those cases.
An improvement concerning abstract interpretation is the conditional
interpretation. For instance, in the compressor \figref{fig:comp} example, we
interpretation. For instance, in the compressor example \figref{fig:comp}, we
have a signal which is in substance \lstinline{(15 * x < 12) * 2 * x }, which
means in signal, ``if \(15x < 12\), then return \(2*x\) else 0'', so on this
signal, we get an additional information on the signal x, which is
......@@ -48,39 +61,36 @@ compressor output is bounded by \(1\).
Finally, we stated that there were necessarily impossible cases do define
upperbounds which requires manual annotations of the code, yet we did not asked
ourselves if those cases were real-life usescases or only useless mathematical
ourselves if those cases were real-life usecases or only useless mathematical
counterexamples. A good question to answer would be to know which are the mainly
used processors on which we need to concentrate our efforts (we already know
that filters are very important (smooth and biquads), ramps (to scan an array),
linear operators (in additive synthesis, such as the bell), long delays also (for
echoes), but are there others ?)
\todo{se demander quelles sont les choses importantes dans Faust: est-ce qu'on
touche à peu près tout avec filtres + échos + op linéaires}
echoes), but are there others?)
\subsection{Long-term Perspectives}
\label{sec:long-term-persp}
\todo{détection de filtres (ODIN), parler du problème des filtres à écho et des
filtres à sliders} \todo{ILP, approche statistiques}
\subsubsection{External tools for pattern design}
\label{sec:extern-tools-patt}
The aim of this internship was do devise tools that can be embedded within the
Faust compiler. As this seems to be a tremendous task (to be efficient on LSB,
we should at least reimplement a consequent subset of MPFR in the compiler), we
we should at least re-implement a consequent subset of MPFR in the compiler), we
might consider an approach using external tools to annotate Faust programs
before feeding them to the compiler. With this we could interface
state-of-the-art tools do devise some parts of the Faust program. There is for
instance \cite{adje2021fastefficient}, which succeeds in doing automatic MSB and
LSB computations by describing them as variables on an Integer Linear Program
(ILP) and to prove that any solution on the relaxed problem is necessarily an
integer one using an ILP Meta-theorem. So after having extracted from a given
program an ILP problem, they just need to run on it any solver to get a
nearly-optimal solution. Their work was on a small toy imperative
turing-complete language, but it might be translated into Faust block-diagram
algebra.
We also mentionned twice the work in \cite{volkova2017reliableimplementation}.
state-of-the-art tools do devise some parts of the Faust program.
There is for instance \cite{adje2021fastefficient}, which does automatic MSB and
LSB computations by describing the MSB and LSB of each value as variables on an
Integer Linear Program (ILP). To solve these ILP, they prove that any solution
on the LP relaxation (the ILP without the integer constraint) of the problem is
an integer solution. So they just need to run on it any LP solver (such as the
simplex) to get a nearly-optimal solution. Their work was on a small toy
imperative Turing-complete language, but it might be translated into Faust
block-diagram algebra.
We also mentioned twice the work in \cite{volkova2017reliableimplementation}.
The algorithm implemented in this thesis is to rewrite the equation
\(y(t) = \sum_{k=0}^M b_kx(t-k) - \sum_{k=1}^Na_ky(t-k)\) as a matrix linear
relation coming from the theory of control systems (recall that
......@@ -112,25 +122,33 @@ characteristics 1. saves the user the need of computing filter coefficients
corresponding to the wanted characteristics and 2. gives one more degree of
freedom to the filter designer that allows to do more efficient accurate filters.
\subsubsection{Unexplored Possibilities}
\label{sec:unexpl-poss}
Although equality saturation was not the subject of this internship, it would be
interesting to implement it in the Faust compiler. As hard as it may be, Faust
already needed the complex tools used in Equality Saturation (namely the
Hash-consig algorithm\cite{kiselyov2011implementingexplicit}) for testing rooted
oriented graphs equivalences. As equality saturation works very well with
lattices, given an expression, one could compute the most interval-friendly
equivalent expression.
Although equality saturation\cite{willsey2021eggfast} was not the subject of
this internship, it would be interesting to implement it in the Faust
compiler. As hard as it may be, Faust already needed the complex tools used in
Equality Saturation (namely the Hash-consig
algorithm\cite{kiselyov2011implementingexplicit}) for testing rooted oriented
graphs equivalences. As equality saturation works very well with lattices, given
an expression, one could compute the most interval-friendly equivalent
expression.
Something we did not study is operator recycling. Indeed, in HLS, when an
Something we did not study is operator sharing. Indeed, in HLS, when an
operator, for instance a floating-point adder, must be used in two parts of a
circuit, the HLS may succed to use a single circuit to do both computations
circuit, the HLS may succeed to use a single circuit to do both computations
(with for instance pipeline). Yet, by designing one format per signal, we
completely loose the redundancy of operators (we will have an adder for inputs
in \(\sfix(0,-23)\), another for inputs in \(\sfix(0, -21)\), etc\dots). So,
even by computing less bits, we could use more silicon. This has been studied in
even by computing fewer bits, we could use more silicon. This has been studied in
\cite{menard2012highlevelsynthesis} using optimisation techniques. The idea is,
by increasing accuracy, computing useless bits but setting many operators at the
same format, in order to use one implementation for many operators.
An other interesting idea from this paper is the use of statistical error,
instead of our worst-case upperbounds. Provided a good statistical distribution
of rounding errors, this enables to consider infrequent extremal cases as rare
and acceptable.
\end{document}
%%% Local IspellDict: en
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -8,18 +8,20 @@
\hline
ADC & Analog-to-Digital Converter\\
ASIC & Application-Specific Integrated Circuit \\
CCDE & Constant-Coefficients Difference Equation \\
DAC & Digital-to-Analog Converter\\
FAST & Fast Audio Signal processing Techniques on FPGA\\
FAUST & Functionnal AUdio STream\\
FIR & Finite Inpulse Response Filter\\
FAUST & Functional AUdio STream\\
% FIR & Finite Inpulse Response Filter\\
FPGA & Field-Programmable Gate Array\\
HLS & High-Level Synthesis\\
IIR & Infinite Inpulse Response filter\\
% IIR & Infinite Inpulse Response filter\\
LSB & Least Significative Bit\\
LTI & Linear Time-Invariant \\
MSB & Most Significative Bit\\
RN & Round-to-Nearest \\
SOC & System On Chip\\
ulp & Unit in last position\\
ulp & Unit in last place\\
WCPG & Worst-Case Peak Gain\\
\hline
\end{tabular}
......@@ -28,25 +30,28 @@
\label{sec:notations}
\begin{tabular}[h]{|l|l|}
\hline
\(X, Y, Z\) & real intervals \\
\(S(t)\) & interval containing the signal \(s(t)\) for each time \(t\)\\
\hline
\(a,b,c,d\) & Real value \\
\(i, j, k\) & index \\
\(\lsb\) & Least Significative Bit (LSB), or an integer\\
\(\msb\) & Most Significative Bit (MSB), or an integer\\
\(n, p, q\) & Nonnegative integer \\
\(i, j, k\) & indices \\
\(\lsb\) & LSB position, or an integer\\
\(\msb\) & MSB position, or an integer\\
\(n, p, q\) & Non-negative integer \\
\(s(t)\) & A Signal \(s: \Rel \to \Real\) \\
\(w\) & A wordlength (the size in bits of a number)\\
\(w\) & A word-length (the size in bits of a number)\\
\(x(t)\) & Input signal \\
\(y(t)\) & Output signal \\
\(x,y,z\) & Real value \\
\hline
\(\abserr\) & Absolute error \\
\(\fq\) & Sampling frequency \\
\hline
\(\cpt{f}\) & The computed version (including roundings) of the function \(f\)\\
\(\rnd\) & a rounding operator (generally round-to-nearest) \\
\(\op\) & an operation \\
\hline
\end{tabular}
\end{document}
......
No preview for this file type
......@@ -3,11 +3,13 @@
\usepackage{geometry}
\geometry{left=20mm,right=20mm,top=10mm}
\geometry{left=20mm,right=20mm,top=10mm, bottom=8mm, heightrounded,
includefoot}
\usepackage[english]{babel}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
%\usepackage{xcolor}
\usepackage{algorithm}
\usepackage{algpseudocode}
......@@ -15,13 +17,35 @@
\lstset{