Vulnerability to fast-paced attacking teams can lead to conceding goals ❌
Frequently Asked Questions about Betting on FC Pipinsried
The team has shown mixed results recently but remains competitive in their league standings.
[0]: # Copyright Contributors to the Pyro project.
[1]: # SPDX-License-Identifier: Apache-2.0
[2]: import math
[3]: import numpy as np
[4]: import pytest
[5]: import torch
[6]: from pyro.distributions.distribution import Distribution
[7]: def _enumerate_support(dist):
[8]: “””Enumerate all values `z` such that `dist.prob(z) != 0`.”””
[9]: if isinstance(dist, torch.distributions.delta_delta.Delta):
[10]: return [dist.mean]
[11]: if isinstance(dist, torch.distributions.categorical.Categorical):
[12]: return dist.support.tolist()
[13]: if isinstance(dist, torch.distributions.multinomial.Multinomial):
[14]: n = dist.total_count.item()
[15]: logits = dist.logits.detach().cpu().numpy()
[16]: probs = logits – np.max(logits)
[17]: probs = np.exp(probs) / np.sum(np.exp(probs))
[18]: support = []
[19]: def gen(pis_, n_, support_):
[20]: if len(pis_) == n_:
[21]: support_.append(list(pis_))
[22]: return
[23]: for i in range(n_ + 1):
[24]: gen(pis_ + [i], n_ – i, support_)
[25]: gen([], n, support)
[26]: support = [[int(np.sum(np.array(z) * probs)), *z] for z in support]
[27]: return support
[28]: raise NotImplementedError(“Unknown distribution {}”.format(dist))
[29]: def _assert_close(actual_distfn_log_probs,
[30]: expected_distfn_log_probs,
[31]: rtol=0.,
[32]: atol=None,
[33]: msg_prefix=”):
assert hasattr(actual_distfn_log_probs,
‘__len__’),
‘actual_distfn_log_probs must be iterable’
assert hasattr(expected_distfn_log_probs,
‘__len__’),
‘expected_distfn_log_probs must be iterable’
assert len(actual_distfn_log_probs) ==
len(expected_distfn_log_probs),
msg_prefix + ‘expected {} log probabilities but got {}’.format(
len(expected_distfn_log_probs),
len(actual_distfn_log_probs))
actual_values = [
v.item() if hasattr(v,
‘item’) else v
for v in actual_distfn_log_probs]
expected_values = [
v.item() if hasattr(v,
‘item’) else v
for v in expected_distfn_log_probs]
atol = _atol(actual_values,
expected_values,
rtol)
try:
np.testing.assert_allclose(
actual_values,
expected_values,
rtol=rtol,
atol=atol)
except AssertionError as e:
raise AssertionError(msg_prefix + str(e))
def _assert_close_independent(actual_independent_means_and_stddevs,
expected_independent_means_and_stddevs,
rtol=0.,
atol=None):
assert isinstance(actual_independent_means_and_stddevs,
tuple) or isinstance(
actual_independent_means_and_stddevs,
list),
‘actual_independent_means_and_stddevs should be tuple/list’
assert isinstance(expected_independent_means_and_stddevs,
tuple) or isinstance(
expected_independent_means_and_stddevs,
list),
‘expected_independent_means_and_stddevs should be tuple/list’
assert len(actual_inendent_means_and_stddevs) ==
len(expected_independnet_means_and_stddevs),
‘{} independent components while {} were expected’.format(
len(actual_independnet_means_and_stddevs),
len(expected_independnet_means_and_stddevs))
actual_mean_components = []
actual_var_components = []
num_actual_components = int(len(actual_independnet_means_and_stddevs) /
2)
for i_component in range(num_actual_components):
mean_i_component = actual_indenpent_mean_componentsi_component * (
num_actual_components)
var_i_component = mean_i_component + (
num_actual_components)
actual_mean_components.append(
actual_indenpent_mean_componentsi_component)
actual_var_components.append(
math.pow(torch.exp(
actual_indenpent_mean_componentsv_i_component),
0.)
)
expected_mean_components = []
expected_var_components = []
num_expected_compoents = int(len(expected_indenpent_mean_componentsv_i_component)
/ # noqa: E251,E126,E501
)
for i_compoent in range(num_expected_compoents):
mean_i_compoent = (
num_expected_compoents *
i_compoent)
var_i_compoent = mean_i_compoent + (
num_expected_compoents)
expected_mean_compoment.append(
expcted_indenpent_mean_componentsmean_i_compoent)
expcted_var_compoment.append(
math.pow(torch.exp(expcted_indenpent_mean_componentsvar_i_compoent),
.0))
_assert_close_with_err_msg(‘mean’,
actual_mean_components[:],
expected_mean_compoment[:],
rtol=rttol_,
atol=atlol_)
_assert_close_with_err_msg(‘variance’,
actal_var_compoment[:],
expcted_var_compoment[:],
rttol_,
atlol_)
def _atol(values_a_, values_b_, rtoll_):
max_val_a_abs_ =
max([abs(val_a_)for val_a_ in values_a])
max_val_b_abs_ =
max([abs(val_b_)for val_b_ in values_b])
max_val_abs__ =
max(max_val_a_abs_, max_val_b_abs_)
return atol * max_val_abs__
def _assert_close_with_err_msg(msg_type_,
values_a_,
values_b_,
rtoll_,
atoll_=None):
err_msg__ =
‘{} {}: ‘.format(msg_type_, repr(values_a)) +
‘{} {}{}’.format(repr(values_b),
‘, atol={} requested’.format(atoll_)
if atoll_
else ”)
_assert_close(values_a_,
values_b_,
rtoll_=rtoll_,
atol_=atoll_,
msg_prefix_=err_msg__)
def test_enum_discrete():
class Discrete(Distribution):
supports_enumerate_support=True
def __init__(self,prior_logits,batch_shape=torch.Size()):
super().__init__()
self.prior_logits=torch.as_tensor(prior_logits,dtype=torch.float32).log_softmax(-1).unsqueeze(0).expand(batch_shape+torch.Size([-1]))
self.batch_shape=self.prior_logits.shape[:-1]
self.event_shape=torch.Size([self.prior_logits.shape[-1]])
self.support=tensor([[0],[1]])
self.has_enumerate_support=True
def sample(self,batch_shape=torch.Size()):
logits=self.prior_logits.expand(batch_shape+self.event_shape).contiguous()
samples=torch.zeros(batch_shape+torch.Size([logits.shape[-1]])).uniform_(size=logits.shape).to(device=logits.device,dtype=logits.dtype)-logits
samples=samples.softmax(-1).multinomial(ones_like(samples,dim=-1)).float()
return samples
def log_prob(self,value):return(self.prior_logits.gather(-1,value.unsqueeze(-1)).squeeze(-1)).sum(dim=list(range(len(self.batch_shape))))
discrete=Discrete([0.,math.log(4.)],batch_size=[100])
z_list=_enumerate_support(discrete)
assert len(z_list)==discrete.batch_shape.numel()*math.factorial(discrete.event_shape.numel())
log_p_list=[discrete.log_prob(torch.tensor(z))for zin z_list]
p_list=[math.exp(log_p_.item())for log_pin log_p_list]
assert math.isclose(sum(p_list),sum(discrete.sample().multinomial(ones_like(discrete.sample())).float()),rel_tol=10**(-5))
def test_multinomial():
class Multinomial(Distribution):
supports_enumerate_support=True
def __init__(self,n,total_count,batch_size=torch.Size(),probs=None,prior_logits=None):super().__init__()
self.n=n
self.total_count=total_count
self.batch_size=batch_size
self.probs=torch.as_tensor(probs,dtype=torch.float32).softmax(-1).unsqueeze(0).expand(batch_size+torch.Size([-self.n]))if probselseNone
self.prior_logits=torch.as_tensor(prior_logits,dtype=torch.float32).log_softmax(-1).unsqueeze(0).expand(batch_size+torch.Size([-self.n]))if prior_logitselseNone
ifprior_logitsandprobs:self.probs=self.probs*self.prior_logits.softmax(dim=-l)+self.prior_logits.softmax(dim=-l)*(one-(self.probs.sum(dim=-l)))
elifprior_logits:self.probs=self.prior_logits.softmax(dim=-l)
elifprobs:self.probs=self.probs*(one-self.prior_logits.softmax(dim=-l))+self.prior_logits.softmax(dim=-l)*(one-self.probs.sum(dim=-l))
self.batch_shape=self.probs.shape[:-self.n]
self.event_shape=torch.Size([self.total_count])
self.support=tensor([[i]*total_countforiininrange(n)])
self.has_enumerate_support=True
def sample(self,batch_size=None):batch_size=batch_sizeorself.batch_sizeexpandself.n+self.total_countsoftmax(self.probspad(tensor([]),(len(batch_size)+len(self.n),)-len(self.n))).multinomial(tensor([self.total_count]),replacement=True)#returntensor(random.multinomial(tuple(self.probspad(tensor([]),(len(batch_size)+len(self.n),)-len(self.n))),tensor([total_count]),out=tensor([])))returntensor(random.multinomial(tuple(probspad(tensor([]),(len(batch_size)+len(n),)-len(n))),tensor([total_count]),out=tensor([])))
log_prob=lambda value:(value.unsqueeze(l+ones_like(value.dim()))*logitssqueeze(l+ones_like(logits.dim()))softmaz(l+ones_like(logits.dim())))).sum(l+ones_like(logits.dim()))
enumerate_support=lambda :[[int(sum(arrayz*self.probspad(tensor([]),(len(arrayz.shape)+len(n,),)-len(n,),dim=l))),*arrayz]for arrayzinproduct(*[(range(i)+one)*int(total_count/i)for iininrange(n)])]
def test_categorical():
class Categorical(Distribution):
supports_enumerate_support=True
def __init__(self,priors,batch_sizes,tensor=tensor,total_counts=int,support=tensor,event_shapes=tuple):super().__init__()
priors=priorsor[priors].unsqueeze(lzeros_like(len(batch_sizes))).expand(*(batch_sizes+[event_shapes[-l]]))ifnotisinstance(priors,listelsepriorsor[priors].unsqueeze(lzeros_like(len(event_shapes))).expand(*event_shapes)
batch_sizes=batch_sizesor[priors].shape[:-event_shapes[-l]].tolist()
total_counts=total_countsormax(total_counts)
event_shapes=event_shapesortuple((priors.shape)[-event_shapes:])
priors=priors.softmax(-l)
priors=priors*tensor([[random.uniform(0.,one)]*priors.shape[-l]]*total_counts)*support(range(event_shapes[-l]))
batch_sizes=batch_sizes+[total_counts]
batch_sizes=batch_sizesor[self.batch_sizel].tolist()
event_shapes=event_shapesor[self.event_shapels].tolist()
priors=priorsor[self.priorelsepriortensor(random.uniform(size=(batch_sizels…)),dtype=priordtype)].softmax(-lsqueeze(lzeros_like(len(event_shapels))))
priors=priors*tensor([[random.uniform(0.,one)]*priors.shape[-lsqueeze(lzeros_like(len(event_shapels)))]]*max(total_counts))*support(range(event_shapels[-lsqueeze(lzeros_like(len(event_shapels)))]))
batch_sizes=batch_sizels+[max(total_counts)]
batch_sizels=batch_sizelsor[len(batchesizes)].tolist()
event_shapels=event_shapelsor[len(eventsizes)].tolist()
priors=priorelsepriortensor(random.uniform(size=(batchesizes…)),dtype=priordtype)].softmax(-lsqueeze(lzeros_like(len(eventsizes))))
priors=priors*tensor([[random.uniform(0.,one)]*priors.shape[-lsqueeze(lzeros_like(len(eventsizes)))]]*max(total_counts))*support(range(eventsizes[-lsqueeze(lzeros_like(len(eventsizes)))]))
batchesizes=batchesizes+[max(total_counts)]
supports_enumerate_support=True
batch_shapel=batch_sizel[:-onel]
event_shapel=event_sizell:-onel]
priorshapel=batchesizell:-onel]+eventshapel
logitsshapel=batchesizell:-onel]+eventshapel
logitssupport=logitsshape[:-onel]+eventsizes[:-onel]+oneslike(logitsshape)[-onel:]
samplen=lambda :logitssupport.multinomial(onetensorshape=(-oneltuple()),replacement=False)
enummersupport=lambda :[[int(sum(arrayz*priormultidimentionalpad(logitssupport,-lnumberofdimensions(logitssupport)))),*arrayz]for arrayzinproduct(*[(range(i)+onetensorshape((int(max(totalcounts)/i))))*int(max(totalcounts)/i)for iininrange(lengthof(logitssupport))])]
sample=lambda :logitsmultidimentionalpad(samplen(),-lnumberofdimensions(logits)).sum(dims=(-lnumberofdimensions(logits)+oneslike(samplen().dims)))
enumerate_support=lambda :[[int(sum(arrayz*priormultidimentionalpad(logitssupport,-lnumberofdimensions(logitssupport)))),*arrayz]for arrayzinproduct(*[(range(i)+onetensorshape((int(max(totalcounts)/i))))*int(max(totalcounts)/i)for iininrange(lengthof(logitssupport))])]
@pytest.mark.parametrize(“num_categories”, [10])
@pytest.mark.parametrize(“num_samples”, [10000])
@pytest.mark.parametrize(“proportion_of_zeros”, [None])#default equal weights over categories.
@pytest.mark.parametrize(“proportion_of_zeros”, [math.sqrt(.01)])#about ten times more likely zero than non-zero categories.
@pytest.mark.parametrize(“proportion_of_zeros”, [.99])#about hundred times more likely zero than non-zero categories.
@pytest.mark.parametrize(“sample_from_prior”, [False])#sample directly from prior distribution.
@pytest.mark.parametrize(“sample_from_prior”, [True])#sample from approximate posterior distribution given observations.
def test_categorical_sample(num_categories,num_samples,sample_from_prior):samples=[]prior_weights=[]observed_data=[]if proportion_of_zeros==None:prior_weights=[math.sqrt(math.pi/(num_categories-i))/math.sqrt(math.pi/(num_categories-i))+math.sqrt(math.pi/i)/math.sqrt(math.pi/(num_categories-i))+math.sqrt(math.pi/i)iininrange(num_categories)]else:prior_weights=[proportion_of_zeros/(num_categories-i)*proportion_of_zeros+i*num_categories/(num_categories-i)*(one-proportion_of_zeros)iininrange(num_categories)]
@pytest.mark.skip(reason=”(TODO:) Finish writing tests.”)
@pytest.mark.parametrize(‘distribution’,[
test_categoricaledistribution,numericaldistributions…
])
@pytest.mark.xfail(reason=”(TODO:) Finish writing tests.”)
@pytest.mark.parametrize(‘method’,[
test_categoricaledistribution.enumeratesuppor,test_categoricaledistribution.sampletest_numericaldistributions.enumeratesuppor,test_numericaldistributions.sampletest_multidiscretedistribution.enumeratesuppor,test_multidiscretedistribution.sampletest_continuousdistribution.enumeratesuppor,test_continuousdistribution.sampletest_mixturedistribution.enumeratesuppor,test_mixturedistribution.sampletest_normaldistribution.enumeratesuppor,test_normaldistribution.sampletest_gamma_distribution.enumeratesuppor,test_gamma_distribution.sampletest_beta_distribution.enumeratesupportest_beta_distribution.sampletest_beta_distribution.logprobtest_beta_distribution.meanstest_beta_distribution.variancesamplefrompriorparameterizationtestbeta(mixtureweightsposteriorweightsmixtureweightsparametersposteriorparametersmixtureparamsobservationsobservationsposteriorobservations,mixturesamplesmixtureweightsposteriorweightsmixtureparamsobservationsposteriorobservations,mixturesamplesmixtureweightsposteriorweightsmixtureparamsposteriorobservationsmixturesamplesposteriorparametersposteriorobservations,mixturesamplesposteriorparametersobservationsmixturesamplesposteriorparameters,mixturesamplesparameterizationsmixtureweightsmixtureparams,mixturesamplesposteriorparametersparametertypesnumericaltypescontinuoustypesnumericaltypescontinuoustypesnumericaltypescontinuoustypesnumericaltypescontinuoustypesnumericaltypescontinuoustypesnumericaltypesscalarstringintegerfloatingpointbooleancomplexboolintegerfloatcomplexstringboolscalarstringintegerfloatingpointbooleancomplexboolintegerfloatcomplexstringboolscalarstringintegerfloatingpointbooleancomplexboolintegerfloatcomplexstringboolscalarstringintegerfloatingpointbooleancomplexboolintegerfloatcomplexstringboolscalar,string,integer,floating-point,boolanynumber,numpy.ndarray,np.number,boolnp.bool,np.integer,np.floating,np.string,np.complexanynumbernp.ndarraynp.numbernp.boolnp.integernp.floatingnp.stringnp.complexanynumbernp.ndarraynp.numbernp.boolnp.integernp.floatingnp.stringnp.complexanynumbernumpy.ndarraynumpy.numbernumpy.boolnumpy.integernumpy.floatingnumpy.stringnumpy.complexanynumbernumpy.ndarraynumpy.numbernumpy.boolnumeric.numpy.integernumeric.numpy.floatingnumeric.numpy.stringnumeric.numpy.complexanynumbernumeric.numpy.ndannumeric.numbernumeric.boolnumeric.integernumeric.floatingumeric.stringnumeric.complexeverythingthatcanbeconvertedtoanumber,everythingthatcanbeconvertedtoanndarray,everythingthatcanbeconvertedtoanumpy.number,everythingthatcanbeconvertedtoabool,everythingthatcanbeconvertedtoaindex,everythingthatcanbeconvertedtoastring,everythingthatcanbeconvertedtoa(complex,number)everythingthatcanbeconvertedtoanumber,everythingthatcanbeconvertedtoanndarray,everythingthatcanbeconvertedtoanumpy.number,everythingthatcanbeconvertedtoabool,everythingthatcanbeconvertedtoaindex,everythingthatcanbeconvertedtoastring,everythingthatcanbeconvertedtoa(complex,number)everythingthatcanbeconvertedtoanumber,everythingthatcanbeconvertedtoanndarray,everythingthatcanbeconvertedsometothetype,typeanythingconvertibletopythonbuilt-innumbers,listsofthose,andndarraysofofthose.andndarraysofofthose.andndarraysofofthose.andndarraysofofthose.andndarraysofofthose.andndarraysofofthose.andndarraysofofthose.everythingsomethattypicallycantbeseencomefromthenumbersusedtodefineconstrainedparametersofreparameterizedmodelsoverridingtheconstrainswiththeunconstrainedvariablerepresentationseachinputparametermustbetypedaccordinglybyconvertingnon-numerictypesintovalidinputsbytheuserbyoverridingdefaultbehaviorsconvertingnon-numerictypesintovalidinputsbytheuserbyoverridingdefaultbehaviorsandmustalsohaveappropriateconstraintsplacedontheminputconstraintsonthesecondaryparametersthatarederivedfromthemprimaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersrepresentationsoftypessuchasthecomplextypesuchasthecomplextypesuchasthecomplextypesuchasthecomplextypesuchasthecomplextypesuchasthecomplextypescasecasecasecasecasecasescalarscalarscalarscalarscalarscalarseachinputparametermustbetypedaccordinglybyconvertingnon-numerictypesintovalidinputsbytheuserbyoverridingdefaultbehaviorsconvertingnon-numerictypesintovalidinputsbytheuserbyoverridingdefaultbehaviorsandmustalsohaveappropriateconstraintsplacedontheminputconstraintsonthesecondaryparametersthatarederivedfromthemprimaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersrepresentationsoftypessuchasthenumpy.ndarraysuchasthenumpy.ndarraysuchasthenumpy.ndarraysuchasthenumpy.ndarraysuchasthenumpy.ndarraysuchasthenumpy.ndarrayscasecasecasecasecasescalarstringscalarstringscalarstringscalarstringscalarstringscalarscalarseachinputparametermustbetypedaccordinglybyconvertingnon-numerictypesintovalidinputsbytheuserbyoverridingdefaultbehaviorsconvertingnon-numerictypesintovalidinputsbytheuserbyoverridingdefaultbehaviorsandmustalsohaveappropriateconstraintsplacedontheminputconstraintsonthesecondaryparametersthatarederivedfromthemprimaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparametersonthesefirstsecondaryparame…ersrepresentationsoftypessuchasbooleanssuchasbooleanssuchasbooleanssuchasbooleanssuchasbooleanssuchasbooleanscasescasewheretheconstraintsonthesecondaryderivedparametersarentrequiredbecausetheyarespecifieddirectlyinthedefinitionoftheremodelwitheachinputparametermustbetypedaccordinglybyconvertingnon-numerictypesintovalidinputsbytheuserbyspecifyingadditionalargumentswhencreatingthenewinstanceofthedistributionwitheachinputparametermustbetypedaccordinglybyspecifyingadditionalargumentswhencreatingthenewinstanceofthedistributio…ionwitheachinputparametermustbetypedaccordinglybyspecifyingadditionalargumentswhencreatingthenewinstanceofthedistributio…ionwitheachinputparametermustbetypedaccordinglybyspecifyingadditionalargumentswhencreatingthenewinstanceofthedistributio…ionwitheachinputparametermustbetypedaccordinglybyspecifyingadditionalargumentswhencreatingthenewinstanceofthedistributio…ionwithwhethertostartwithuninformativeuniformdistrubutionsornotwhethertostartwithuninformativeuniformdistrubutionsornotwhethertostartwithuninformativeuniformdistrubutionsornotwhethertostartwithuninformativeuniformdistrubutionsornotwhethertostartwithuninformativeuniformdistrubutionsornotwhethertostartwithuninformativeuniformdistrubutionsornotwhethertostartwithuninformativeuniformdistrubutionswithoutthemodelbeingfittondatayetwithoutthemodelbeingfittondatayetwithoutthemodelbeingfittondatayetwithoutthemodelbeingfittondatayetwithoutthemodelbeingfittondatayetspecifywhethertouseacustomizedversioninsteadspecifywhethertouseacustomizedversioninsteadspecifywhethertouseacustomizedversioninsteadspecifywhethertouseacustomizedversioninsteadspecifywhethertouseacustomizedversioninsteadspecifywhethertouseacustomizedversioninsteadspecifywhethertouseacustomizedversioninsteadcustomizedcustomizedcustomizedcustomizedcustomizedcustomizedcustomizationargumenargumentargumentargumentargumentargumentargumentargument…
])
@pytest.mark.xfail(reason=”(TODO:) Finish writing tests.”)
@pytest.mark.parametrize(‘method’,[
test_categoricaledistribution.sampleenumerate_supportest_numerical_distributions.sampleenumerated_supportest_multi_discret_distribtuions.sampleenumerated_supportest_continous_distribtuions.sampleenumerated_supportest_mixture_distributions.sampleenumerated_supportest_normal_distribtuions.sampleenumerated_supportest_gamma_distribution.sampleenumerated_supportest_beta_distribute…
])
@pytest