Commit aa9bfbef authored by janlukas.bosse's avatar janlukas.bosse
Browse files

af asd as fd

parent c5f92544
This diff is collapsed.
This diff is collapsed.
# Advanced Computational physics
Libraries and reference data needed for the Advanced Computational Physics Lab Course 2020
\ No newline at end of file
%!TEX root = report.tex
% ----------------------------------------------------------------------------
% Use the KomaScript styling on A4 paper
% ----------------------------------------------------------------------------
\documentclass[a4paper,11pt,parskip=half,toc=bib]{scrartcl}
\usepackage[pass]{geometry}
\usepackage{todonotes}
% ----------------------------------------------------------------------------
% Use UTF-8 encoding
% ----------------------------------------------------------------------------
\usepackage[utf8]{inputenc}
\usepackage[english]{babel}
\DeclareUnicodeCharacter{2212}{-}
% ----------------------------------------------------------------------------
% pretty fonts
% ----------------------------------------------------------------------------
%\usepackage{palatino}
\usepackage[sc]{mathpazo} % Use the Palatino font
\usepackage[T1]{fontenc} % Use 8-bit encoding that has 256 glyphs
\linespread{1.05} % Line spacing - Palatino needs more space between lines
\usepackage{microtype} % Slightly tweak font spacing for aesthetics
% ----------------------------------------------------------------------------
% Headings and spacing
% ----------------------------------------------------------------------------
\addtokomafont{disposition}{\rmfamily}
\usepackage[section]{placeins}
\makeatletter
\AtBeginDocument{%
\expandafter\renewcommand\expandafter\subsection\expandafter{%
\expandafter\@fb@secFB\subsection
}%
}
\makeatother
% ----------------------------------------------------------------------------
% physics and maths related things
% ----------------------------------------------------------------------------
\usepackage{physics}
\usepackage{amsmath, amsthm, amssymb, amsfonts}
\renewcommand{\vec}[1]{\boldsymbol{#1}} % bold vectors
% ----------------------------------------------------------------------------
% Figures etc
% ----------------------------------------------------------------------------
\usepackage[export]{adjustbox}
\usepackage{wrapfig}
\usepackage[format=plain,indention=1.5em,small,
labelfont=bf,up,textfont=it,up,skip=5pt]{caption}
\usepackage{subcaption}
% whack shit to get the pngs in the pgf to be included correctly
\usepackage{pgfplots}
\pgfplotsset{compat=1.16}
\usepackage{pgf}
% ----------------------------------------------------------------------------
% Citing and Referencing
% ----------------------------------------------------------------------------
\usepackage[backend=biber, style=phys, biblabel=brackets,
citestyle=numeric-comp, doi=false, sorting=none]{biblatex}
\usepackage[autostyle]{csquotes}
\addbibresource{literature.bib}
\usepackage[pdfstartview=FitH, % Oeffnen mit fit width
breaklinks=true, % Umbrueche in Links, nur bei pdflatex default
bookmarksopen=true, % aufgeklappte Bookmarks
bookmarksnumbered=true, % Kapitelnummerierung in bookmarks
colorlinks=true, urlcolor=blue, pdfborder={0 0 0},
pdfencoding=auto
]{hyperref}
\usepackage{cleveref}
\usepackage{nameref}
\numberwithin{equation}{section}
\numberwithin{table}{section}
\numberwithin{figure}{section}
% ----------------------------------------------------------------------------
% The title page
% ----------------------------------------------------------------------------
\title{Project Report --- Monte-Carlo Simulations for Particle Physics}
\author{Jan Lukas Bosse}
\date{\today}
\begin{document}
%\maketitle
\begin{titlepage}
\thispagestyle{empty}
\centering
\begin{minipage}[t]{.5\textwidth}
\includegraphics[height=1.5cm,left]{Logo_Uni_Goettingen}
\end{minipage}%
\begin{minipage}[t]{.5\textwidth}
\includegraphics[height=1.5cm,right]{Logo_Physik_links_eng}
\end{minipage}%
\vspace*{4.2cm}
\centering
\Large \textbf{Advanced Computational Physics Lab}
\vspace*{1cm}
%{\LARGE \bfseries
% \titlefont{Computational Many-Body Methods} \\[0.5cm]}
{\LARGE \bfseries
Project Report --- Monte-Carlo Simulations for Particle Physics \\[1.5cm]}
\normalfont\normalsize
\begin{center}
\textbf{\large Jan Lukas Bosse}\\[3mm]
\end{center}
\end{titlepage}
% ----------------------------------------------------------------------------
% And the main part
% ----------------------------------------------------------------------------
\newpage
%\rhead{}
\tableofcontents
\newpage
\setcounter{page}{1}
\section{Simulation of the \(e^+ e^- \to q \overline{q}\)
process at fixed-order QED}
In this first part we integrate the leading order matrix element for
the \(e^+ e^- \to q \overline{q}\) process using Monte-Carlo integration. The
probability for this process to happen at a given scattering angle \(\theta\),
center of mass energy \(s\) and azimuthal angle \(\phi\) is proportional
to the differential cross-section
%
\begin{equation}
f_{q\overline{q}}(s, \cos \theta, \phi) :=
\frac{\dd{\sigma}_{q\overline{q}}}{\dd{s} \dd{\cos \theta} \dd{\phi}}
= p(s) \frac{1}{8\pi} \frac{1}{4 \pi} \frac{1}{2s}
|\mathcal{M}_{q\overline{q}}(s, \cos \theta, \phi)|^2
\end{equation}
%
where \(p(s)\) is the distribution of the center of mass spectrum of the beam
and \(\mathcal{M}_{q\overline{q}}\) is the leading order matrix element
as given in the exercise sheet. To find the total cross-section
\(\sigma_{q\overline{q}}\) of the process
one has to integrate \(f_{q\overline{q}}\) over \(s, \theta\)
and \(\phi\). A closer look at \(\mathcal{M}_{q\overline{q}}\) shows that it
is not dependent on \(\varphi\), so we can skip the \(\varphi\)-integration
and replace it by multiplying the end-result with \(2 \pi\). Furthermore,
\(\cos \theta\) is distributed uniformly when integrating in spherical coordinates,
while \(\theta\) is not. So we will integrate over \(\cos \theta\) from
-1 to 1 instead of over \(\theta\). Besides \(s\) and \(\cos \theta\) the
integrand also depends on the quark flavour of the outgoing quark. Since
all outgoing flavours are equally likely and we integrate using Monte-Carlo
integration, we can simply pick a random flavor for each sample to achieve the
averaging over different quark flavors.
\begin{figure}[b]
\centering
\input{utils/integrand.pgf}
\caption{The integrands \(f_{q \overline{q}}(s, \cos \theta, 0)\) for all
possible outgoing quark flavours. Sadly, it is not easy to tell from this plot
which of the quark flavours corresponds to which of the surfaces. But the
general features are clear: Depending on the outgoing flavour \(q\) the
integrand takes one of two very similar forms. In either case
\(f_{q\overline{q}}\) has its highes values along the \(s=M_Z^2\) line.}
\label{fig:integrand}
\end{figure}
\todo{Plot the integrand here?}
\subsection{Integrating \(\dd{\sigma}_{q\overline{q}}\) at fixed \(s\)}
\label{sec:1d_integration}
First, we calculate \(\sigma_{q\overline{q}}\) for \(p(s) = \delta(s - M_Z^2)\),
where \(M_Z\) is the mass of the Z-boson. Now the the \(s\)-dependence
of the integrating also drops out and we only have to perform a 1-dimensional
integral. The code for the Monte-Carlo integrator is found in
\texttt{integrator.MCIntegrator} and as a sampler we use a
\texttt{integrator.UniformSampler} in the range
\((\cos \theta, y) \in [-1,1] \times [0, 30]\) with \(N_{try} = 100,000\)
samples. After multiplication with
\(2\pi\) for the \(\phi\) integration and other conversion factors we find that
%
\begin{equation}
\sigma_{q\overline{q}}(s=M_Z^2) = 42510 \pm 200 \, \mathrm{pb}.
\end{equation}
\subsection{Integrating over \(\cos \theta\) and \(s\)}
Although the integrand has its largest values at the \(s=M_Z^2\) line, one
should also integrate over \(s\) to get the correct result for the
cross-section. So now we integrate of \(\cos \theta\) and \(s\), the latter
in the interval \([(M_Z - 3 \Gamma_Z)^2, (M_Z + 3\Gamma_Z)^2]\). Since
the integrand is fastly decaying in \(s\) this range should suffice to
approximate the integral for \(s \in [0, \infty]\).
We use mutatis mutandis the same integrator and sampler as in the previous part.
Because we now perform a 2D integral instead of a 1D integral we use now
\(N_{try} = 10,000,000\) samples. As a result we now obtain
%
\begin{equation}
\sigma_{q\overline{q}} = 9920 \pm 30 \, \mathrm{pb}.
\end{equation}
%
Little surprisingly this result is smaller than in the previous section. In
the previous section we chose \(p(s) = \frac{1}{2} \delta(s-M_Z^2)\), which is
supported exactly along the ridge of highest values of the integrand,
while in this section we had
\(p(s) = N \chi_{[(M_Z - 3 \Gamma_Z)^2, (M_Z + 3\Gamma_Z)^2]}(s)\), where
\(\chi_{[a,b]}(x)\) denotes the characteristic function on the interval \([a,b]\)
and \(N\) is a normalization constant.
Hence we sampled alot more smaller function values than in the previous section
and one expects the integral to be smaller.
\begin{figure}
\centering
\input{utils/histograms.pgf}
\caption{Comparison of the acceptance probability as a function of \(s\)
with the marginal integral
\(\int f_{q\overline{q}}(s, \cos \theta, 0) \dd{\cos \theta}\). For comparison
we also show a Breit-Wigner distribution centenred at \(M_Z^2\) with scale
\(M_Z^2 \Gamma_Z^2\). All three
distributions are normalized such that the interal over them is 1.
}
\label{fig:histograms}
\end{figure}
To check consistency with the 1D integration from the previous section one can
also compute the marginal 1D integrals
\(\int f_{q\overline{q}}(s, \cos \theta, 0) \dd{\cos \theta}\) for
sufficiently closely spaced \(s\)-values and then compare their values with the
acceptance probabilities \(p_{acc}(s)\) of sample points as a function of their \(s\)-value
in the 2D integration. This is done in \Cref{fig:histograms}.
For comparison we also show the Breit-Wigner distribution that we will
use later for importance sampling. We see that both functions agree
qualitatively in their overall shape. But \(p_{acc}(s)\) is more peaked than
the marginal integrals; More than could be explained by statistical fluctuations.
This hints, that there still must be some mistake in the code. See
\cref{sec:discussion} for more details.
\subsection{Monte-Carlo errors and number of samples}
One of the advantages of Monte-Carlo integration over e.g. quadrature rules is
that the error of the results scales as \(O(N_{try}^{-\frac{1}{2}})\) irrespective
of the dimension of the integration domain while for said quadrature rule the
neccesary number of function evaluations scales exponentially in the dimension
of the integration domain\footnote{I know, apples and oranges... Still, this is
what people claim as the advntage of Monte Carlo integration.}.
We verify this claim by plotting the Monte-Carlo error estimate as a function of
samples \(N_{try}\) \Cref{fig:monte_carlo_errors}. Little surprisingly, it
follows the \(\frac{1}{\sqrt{N}}\) prediction almost perfectly.
\begin{figure}
\centering
\input{utils/monte_carlo_errors.pgf}
\caption{Monte-Carlo error estimate for the 1D integral in
\cref{sec:1d_integration} (left) and for the 2D integral (right) as a function
of samples \(N_try\). For better comparison we also plot a
\(\frac{1}{\sqrt{N_{try}}}\) line in both cases.}
\label{fig:monte_carlo_errors}
\end{figure}
\subsection{Comparison of Results}
\section{Discussion}
\label{sec:discussion}
\begin{figure}
\centering
\input{utils/3dplot.pgf}
\caption{Oh no!}
\end{figure}
\printbibliography
\end{document}
from vector import Vec4
from particle import Particle
from histogram import Histo1D, Scatter2D
import math as m
from utils.vector import Vec4
from utils.particle import Particle
from utils.histogram import Histo1D, Scatter2D
from intertools import combinations
class Algorithm:
def Yij(self, p, q):
def Yij(self, p: Vec4, q: Vec4):
pq = p.px * q.px + p.py * q.py + p.pz * q.pz
return (2. * min(p.E, q.E)**2
* (1.0 - min(max(pq / m.sqrt(p.P2() * q.P2()), -1.0), 1.0))
/ self.ecm2)
def Cluster(self, event):
def Cluster(self, event: List[Particle]):
while (N := len(events) - 2) > 1:
yij = {}
for i, j in combinations(range(N), 2):
yij[(i, j)] = self.Yij(event[i].mom, event[j].mom)
# find the minimum distance among the remaining particles
# and corresponding particles
dist = ycut
i, j = 0, 0
for p1, p2 in combinations(events[2:], 2):
if (newdist := yij[(p1, p2)]) < dist:
dist = newdist
i, j = p1, p2
if (i, j) == (0, 0):
return event
# TODO: implement the clustering algorithm here, and return a list of
# splitting scales Yij, ordered from smallest Yij to largest Yij
......
import constants as const
import numpy as np
def chi1(s):
out = const.kappa * s * (s - const.M_Z**2)
out /= (s - const.M_Z**2)**2 + const.Gamma_Z**2 * const.M_Z**2
return out
def chi2(s):
out = const.kappa**2 * s**2
out /= (s - const.M_Z**2)**2 + const.Gamma_Z**2 * const.M_Z**2
return out
def eeqq_matrix_element(s : float, costheta : float, phi : float, q):
"""The squared ee -> qq matrix elemenent.
Arguments:
----------
s : The centre of mass energy
costheta : The angle between the incoming e+ and the outgoing q
phi : The azimuthal angle of outgoing quark in the plane perpendicular
to the beam
q : one of ["u", "c", "d", "s", "b"], the flavour of the outgoing quarks
"""
prefactor = (4 * np.pi * const.alpha)**2 * const.N_C
term1 = 1 + costheta**2
term1 *= (const.Q["e"]**2 * const.Q[q]**2
+ 2 * const.Q["e"] * const.Q[q] * const.V["e"] * const.V[q]
* chi1(s)
+ (const.A["e"]**2 + const.V["e"]**2)
* (const.A[q]**2 + const.V[q]**2)
* chi2(s)
)
term2 = np.copy(costheta)
term2 *= (4. * const.Q["e"] * const.Q[q] * const.A["e"] * const.A[q]
* chi1(s)
+ 8. * const.A["e"] * const.V["e"] * const.A[q] * const.V[q]
* chi2(s)
)
return prefactor * (term1 + term2)
def eeqq_mel_random_q(s, costheta, phi):
"Wrapper around `eeqq_matrix_element` that picks a random flavour"
q = np.random.choice(const.out_flavours)
return eeqq_matrix_element(s, costheta, phi, q)
\ No newline at end of file
import sys
import matplotlib.pyplot as plt
import numpy as np
class Bin1D:
......@@ -121,8 +122,10 @@ class Histo1D:
def Plot(self, ax=None, *plt_args, **plt_kwargs):
lefts = [b.xmin for b in self.bins]
lefts.append(self.bins[-1].xmax)
heights = [b.w / (lefts[1] - lefts[0]) for b in self.bins]
# heights = [b.w / (lefts[1] - lefts[0]) for b in self.bins]
heights = [b.w / (b.xmax - b.xmin) for b in self.bins]
heights.append(heights[-1])
heights /= sum(heights) * (lefts[-1] - lefts[0]) / (len(lefts)-1)
if ax is None:
fig, ax = plt.subplots()
ax.step(lefts, heights, *plt_args, where='post', **plt_kwargs)
......
import constants as const
from typing import Callable, List, Tuple
from uncertainties import ufloat
import uncertainties.unumpy as unp
import numpy as np
from histogram import Histo1D
class UniformSampler(object):
def __init__(self, domain, codomain):
"""A sampler for a rectangular sampling domain
Arguments
---------
domain: The sampling domain as a list of tuples [(x0, x1), (y0, y1), ...]
codomain: The sampling codomain as a tuple. Should satisfy
codomain[0] < min(func), max(func) < codomain[1]
"""
self.domain = domain
self.domain_volume = 1
self.codomain = codomain
for varrange in domain:
self.domain_volume *= varrange[1] - varrange[0]
self.volume = self.domain_volume * (codomain[1] - codomain[0])
def __call__(self):
"""Gives a sample (x, y) uniformlly distributed in `domain` x `codomain`"""
x = np.random.uniform(size=(len(self.domain)))
for (dim, dom) in enumerate(self.domain):
x[dim] = dom[0] + x[dim] * (dom[1] - dom[0])
y = np.random.uniform(self.codomain[0], self.codomain[1])
return x, y
class BreitWignerSampler(object):
def __init__(self, prefactor):
"""A sampler for the Breit-Wigner Distribution.
It returns samples `((s, costheta), y)` where `s` is
sampled from the Breit-Wigner distribution, `costheta` uniformly
and `0 < y < prefactor * breit_wigner(s)`.
Arguments
---------
prefactor: The 'height' of the distribution. Should be chose, as low
as possible, that the integrand isn't higher than highest
samples this returns.
"""
self.prefactor = prefactor
self.volume = 2 * prefactor
self.codomain = (0, prefactor / (np.pi * const.M_Z * const.Gamma_Z))
self.domain_volume = 2
def __call__(self):
"""See self.__init__()"""
s = np.random.uniform()
s = const.Gamma_Z * const.M_Z * np.tan(np.pi * (s - 0.5)) + const.M_Z**2
costheta = np.random.uniform(-1,1)
y = np.random.uniform(0, self.prefactor * const.Gamma_Z * const.M_Z
/ (np.pi * ((s - const.M_Z**2)**2 + const.M_Z**2 * const.Gamma_Z**2)))
return np.array([s, costheta]), y
class BreitWignerSampler3D(object):
def __init__(self, prefactor):
"""A sampler for the Breit-Wigner Distribution.
It returns samples `((s, costheta), y)` where `s` is
sampled from the Breit-Wigner distribution, `costheta` uniformly
and `0 < y < prefactor * breit_wigner(s)`.
Arguments
---------
prefactor: The 'height' of the distribution. Should be chose, as low
as possible, that the integrand isn't higher than highest
samples this returns.
"""
self.prefactor = prefactor
self.volume = 4 * np.pi * prefactor
self.codomain = (0, prefactor / (np.pi * const.M_Z * const.Gamma_Z))
self.domain_volume = 2
def __call__(self):
"""See self.__init__()"""
s = np.random.uniform()
s = const.Gamma_Z * const.M_Z * np.tan(np.pi * (s - 0.5)) + const.M_Z**2
costheta = np.random.uniform(-1,1)
phi = np.random.uniform(0, 2*np.pi)
pid = np.random.randint(1, 5)
color = np.random.randint(1, 3)
y = np.random.uniform(0, self.prefactor * const.Gamma_Z * const.M_Z
/ (np.pi * ((s - const.M_Z**2)**2 + const.M_Z**2 * const.Gamma_Z**2)))
return np.array([s, costheta, phi]), pid, color, y
class CauchySampler(object):
def __init__(self, prefactor):
"""
Same as the BreitWignerSampler, but with a standard Cauchy-Distribution.
"""
self.prefactor = prefactor
self.volume = prefactor
self.codomain = (0, prefactor)
self.domain_volume = 1
def __call__(self):
x = np.random.uniform()
x = np.tan(np.pi * (x - 0.5))
y = np.random.uniform(0, self.prefactor / (np.pi * (x**2 + 1)))
return x, y
class MCIntegrator(object):
def __init__(self, func: Callable, sampler,
histograms: bool =False, histbins=20):
"""A Monte-Carlo integrator.
Arguments:
----------
func : The function, that we want to integrate.
sampler : A sampling function that returns (x, y) values lying
around the integration region.
histograms : Record histograms of the accepted (x, y)?
histbins : Number of bins in the histograms. Has no effect, if
`histograms=False`
"""
self.func = func
self.sampler = sampler
self.N_acc = 0
self.N_try = 0
self.I = ufloat(0, 0)
self.histograms = []
if histograms:
for (i, varrange) in enumerate(self.sampler.domain):
self.histograms.append(
Histo1D(histbins, varrange[0], varrange[1], name=f"x{i}"))
@classmethod
def uniform(cls, func: Callable, domain: List[Tuple], codomain: Tuple,
*args, **kwargs):
"A Monte-Carlo integrator on a rectangular domain. (<=> uniform sampling)."
sampler = UniformSampler(domain, codomain)
return cls(func, sampler, *args, **kwargs)
def __call__(self, N_try):
"Integrate using N_try samples and return the estimate of the integral"
for _ in range(N_try):
x, y = self.sampler()
if (f := self.func(x)) > y :
self.N_acc += 1
if self.histograms:
for (i, hist) in enumerate(self.histograms):
hist.Fill(x[i], f)
self.N_try += N_try
p_succ = self.N_acc / self.N_try
self.I = ufloat(self.sampler.volume * p_succ,
self.sampler.volume * np.sqrt(p_succ * (1 - p_succ) / self.N_try)
)
self.I += self.sampler.domain_volume * self.sampler.codomain[0]
return self.I
def __repr__(self):
return str(self.I)
def reset(self):
"Set N_try and N_acc to zero again"
self.N_acc = self.N_try = 0
self.I = ufloat(0, 0)
def samples(self, N_samples):
"Get some samples of the function for debugging purposes"
samples = np.empty(N_samples)
for i in range(N_samples):
x = self.sampler()
samples[i] = self.func(x)
return samples
\ No newline at end of file
import math as m
from utils.vector import Vec4
from vector import Vec4
class Particle:
def __init__(self, pdgid, momentum, col=[0, 0]):
"""
Arguments
--------
pdgid: The flavour of the particle as an integer
1-5: quarks
21: gluon