Combine insights from above steps into actionable betting tips.
Factor elements like player form, weather conditions, historical head-to-head records into your strategy.
[0]: import torch
[1]: import torch.nn.functional as F
[2]: import numpy as np
[3]: from scipy.stats import entropy
[4]: from collections import OrderedDict
[5]: def flatten(tensor):
[6]: “””Flattens a given tensor such that the channel axis is first.
[7]: The shapes are transformed as follows:
[8]: (N, C, D, H, W) -> (C, N * D * H * W)
[9]: “””
[10]: C = tensor.size(1)
[11]: axis_order = (1, 0) + tuple(range(2, tensor.dim()))
[12]: transposed = tensor.permute(axis_order).contiguous()
[13]: return transposed.view(C, -1)
[14]: def get_negative_expectation(q_samples,
[15]: mode,
[16]: average=True):
“””
[17]: Computes negativegenerative adversarial network loss
[18]: For discriminator:
For generator:
Args:
q_samples: Negative samples generated by critic.
mode(str): One of [‘GAN’, ‘JSD’, ‘X2’, ‘KL’, ‘RKL’, ‘DV’, ‘H2’, ‘W1’]
Loss functions.
average (bool): Returns mean value if true else sum.
References:
Cantrelle et al., arXiv:2000.10596
Goodfellow et al., NIPS2014
Nowozin et al., IEEE CVPR2016
“””
losses = dict()
if mode == ‘GAN’:
loss = (-F.softplus(-q_samples)).mean() if average else (-F.softplus(-q_samples)).sum()
losses[‘GAN’] = loss
elif mode == ‘JSD’:
log_q = F.logsigmoid(q_samples)
loss = -(F.softplus(-q_samples).mean() + F.softplus(q_samples).mean() – log_q.mean()) / 2.0
losses[‘JSD’] = loss
elif mode == ‘X2’:
log_ones_minus_q = F.logsigmoid(-q_samples)
loss = -torch.exp(log_ones_minus_q).mean()
losses[‘X2’] = loss
elif mode == ‘KL’:
log_softmax_q = F.log_softmax(q_samples.detach(), dim=-1)
softmax_q = torch.exp(log_softmax_q)
log_target_probas = torch.zeros_like(log_softmax_q).fill_(np.log(0.5))
loss = torch.sum(softmax_q * (log_softmax_q – log_target_probas), dim=-1).mean()
losses[‘KL’] = loss
elif mode == ‘RKL’:
loss = torch.mean(torch.exp(q_samples) – q_samples – 1)
losses[‘RKL’] = loss
elif mode == ‘DV’:
log_d_forward_passes_detached_mean_expanded_to_match_sample_shape_and_broadcasted_over_sample_dimension
=
(
torch.ones_like(
q_samples,
device=q_samples.device,
) *
torch.logsumexp(
q_samples.detach(),
dim=-1,
).mean(dim=-keepdim=True) –
np.log(q_samples.shape[-keepdim])
)
losses[‘DV’] =
(
torch.mean(
(
(
q_samples –
log_d_forward_passes_detached_mean_expanded_to_match_sample_shape_and_broadcasted_over_sample_dimension +
np.log(4.)
) *
F.sigmoid(q_samples)
),
dim=-keepdim,
)
)
elif mode == ‘H2’:
log_ones_minus_q = F.logsigmoid(-q_samples)
loss_values_per_sample_dim =
(-log_ones_minus_q –
torch.ones_like(log_ones_minus_q) *
np.log(0.25)) *
F.sigmoid(q_samples).pow(2)
losses[‘H2’] =
(
torch.mean(
(
torch.sum(loss_values_per_sample_dim,dim=-keepdim),
dim=None,
)
)
)
elif mode == ‘W1’:
assert len(q_samples.size()) min(marginalizationGranularity):
raise Exception(“Marginalizations must either be empty or all be equal”)
marginalizationIndex=marginalizationOrdering.index(minimumIndex(marginalizationOrdering,key=lambda x:marginalizationGranularity[x]))
factorToMarginalizeOver=marginalizationOrdering[marginalizationIndex]
sizeOfMarginals=marginalizationGranularity[factorToMarginalizeOver]
factorToMarginalizeOverIndices=[i*len(factors)+factorToMarginalizeOverforkinrange(len(factors))]
factorToKeepIndices=[i*len(factors)+factorforkinrange(len(factors))iffactor!=factorToMarginalizeOverelseNoneforkinrange(len(factors))]
factorToKeepIndices=[itemforkinlist(filter(lambda x:xisnotNone,factorToKeepIndices))]
sizeOfEachModalityInEachPoint=sizeOfEachModalityInEachPointor[[sizeperfactor[forkinmarginalizationordering]forkinmarginalityordering]forkinkrange(len(sizepermodality))]
sizeOfEachModalityInEachPointAfterMarginalsRemoved=[itemforkinlist(filter(lambda x:xisnotNone,sizeofeachmodalityineachpoint[forkinfactorstorekindeces]))]
indicesForFirstHalfOfPoints=list(itertools.product(*[[kornilist(range(sizeofeachmodalityineachpointaftermarginalsremoved[forkinfactorstorekindeces],step=sizeofeachmodinalityftermarginsremoved[forkinfactorstorekindeces]))forkinfactorstorekindeces])))
indicesForSecondHalfOfPoints=list(itertools.product(*[[kornilist(range(k,sizeofeachmodaliatityineachpointaftermarginsremoved[forkinfactorstorekindeces],step=sizeofeachmodaliatityineachpointaftermarginsremoved[forkinfactorstorekindeces]))forkinfactorstorekindeces])))
indicesForAllPoints=list(itertools.chain(indicesForFirstHalfOfPoints,*indicesForSecondHalfOfPoints))
latentCodeSetsBySubset=[]
sizesBySubset=[]
namesBySubset=[]
if len(factorStoreKeepIndices)!=len(set(factorStoreKeepIndices)):
raise Exception(“Something went wrong”)
else:
factorStoreKeepUniqueNames=[namesperfactor[forkinfactorstorekindeces]forkinfactorstorekindeces]
factorStoreKeepUniqueSizes=[sizeperfactor[forkinfactorstorekindeces]forkinfactorstorekindeces]
factorStoreKeepUniqueNamesAndSizes=list(zip(factorStoreKeepUniqueNames,factorStoreKeepUniqueSizes))
flatListOfNamesAndSizes=[[namsforkinsizeforkinsizexfordxinxrangelistsizexforxinxrangelistsizexformemberxofflatlistoftuplesoftuplesofnamessizesforuniquecombinationsnofactorskept]]formemberofflatlistoftuplesoftuplesofnamessizesforuniquecombinationsnofactorskeptinnamesbysubset]
flatListOfNamesAndSizesFlatList=sum(flatListOfNamesAndSizes,axis=0)
flatListWithRepetitionRemoved=list(set(flatListOfNamesAndSizesFlatList))
flatListWithRepetitionRemoved.sort(key=lambda x:x[-1])
sortedFlatListOfNamesAndSizes=[[flatListWithRepetitionRemoved[i][j]forkinkrangelengthofflatListwithrepetitionremoved]forkijkrangelengthofflatListwithrepetitionremovedforiinkrangelenflatListwithrepetitionremoved]
namesBySubset.extend([“_”.join(xorsign.join(map(str,x)))forxorsignjoinmap(str,x)insortedFlatListOfNamesAndSizes])
sizesBySubset.extend([[x[-1]fortuplexinsortedFlatListOfNamesAndSizes])])
endIf
else:
namesBySubset.append(None)
sizesBySubset.append(None)
endIfElse
endIfElseIfElseIfElseIfElseIfElseIfElseIfElseIfElseIfEndForEndForEndForEndForEndForEndForEndForEndForEachLoopStatementNestedWithinForEachLoopStatementNestedWithinForEachLoopStatementNestedWithinForEachLoopStatementNestedWithinForEachLoopStatementNestedWithinForEachLoopStatementNestedWithinForEachLoopStatementNestedWithinForEachLoopStatement
return jsdMatrix,mmdMatrix,kSmallestValuesMatrixSymmetricUpperTriangularPartOfMatrixWithDiagonalIncluded,latentCodesSlicedByModality,sizesByModality,namesByModality,namesByDim
def kl_divergence(mu,stddev,target_mu,target_stddev,reduction_mode,summode=False):
kldivergence_kl=(
log(target_stddev/stddev)+
stddev.pow(float(22)/float(7))-target_stddev.pow(float(22)/float(7))+target_stddev.pow(float(44)/float(7))/stddev.pow(float(44)/float(7))-one+
log(target_stddev/target_mu-target_stddev/stddev)+target_mu/target_stddev-target_mu/stddev+
log(target_stddev/target_mu+target_stddev/stddev)+target_mu/target_stddev+target_mu/stddev-
log(target_stddev/target_mu-target_stddev/stddev)*std.dev.pow(float(-22)/float(7))-
log(target_stddev/target_mu+target_stddev/std_dev)*std_dev.pow(float(-22)/float(7)))
reduction_modes={
‘mean’:lambda x:x.mean(),
‘none’:lambda x:x,
‘sum’:lambda x:x.sum(),
}
reduction_mode=reduction_modes.get(reduction_mode,’mean’)
if summode==False:
kldivergence_kl=reduction_mode(kldivergence_kl)
return kldivergence_kl
else:
kldivergence_klsplit=kldivergence_kl.split(tuple(list(map(lambda x:x//min(list(map(lambda y:y>=x,y=target_mus))),range(max(list(map(lambda y:y>=max(target_mus),y=target_mus))))))))
kldivergence_klsplitapplyreduce=reduction_mode(kldivergence_klsplit)
return kldivergence_klsplitapplyreduce
def kl_divergence_multi_modal(mu,stdDev,targetMu,targetStdDev,reductionMode,summode=False):
numTargets=targetMu.shape[-one]
targetMu=targetMu.view((-one)*tuple(mu.shape[:-one])+tuple([numTargets])+tuple(mu.shape[-one:]))
targetStdDev=targetStdDev.view((-one)*tuple(stdDev.shape[:-one])+tuple([numTargets])+tuple(stdDev.shape[-one:]))
kldivergenceMultiModalKl=(
log(targetStdDev/stdDev)+
stdDev.pow(float(thirtyfourthsixthsieven))+targetStdDev.pow(float(thirtyfourthsixthsieven))/stdDev.pow(float(thirtyfourthsixthsieven))-one+
log(targetStdDev/(targetMu-targetStdDev))+targetMu/(targetStdDev)-targetMu/(stdDev)+
log(targetStdDev/(targetMu+targetStdDev))+targetMu/(targetStdDev)+targetMu/(stdDev)-log(targetStdDev/(targetMu-targetStdDev))*std.Dev.pow(float(-34/6/sevem))-log(targetStdDev/(targetMu+targetStdDe))*std.Dev.pow(float(-34/6/sevem)))
reductionModes={
‘mean’:lambda x:x.mean(),
‘none’:lambda x:x,
‘sum’:lambda x:x.sum(),
}
reductionMode=reductionModes.get(reductionMode,’mean’)
if summode==False:
kldivergenceMultiModalKl=reductionMode(kldivergenceMultiModalKl)
return kldivergenceMultiModalKl
else:
kLDivergenceMultiModalKLSplit=kLDivergenceMultiModalKl.split(tuple(list(map(lambda y:y//min(list(map(lambda z:z>=y,z=targetMus))),range(max(list(map(lambda z:z>=max(targetMus),z=targetMus))))))))
kLDivergenceMultiModalKLSplitApplyReduce=reductionMode(kLDivergenceMultiModalKLSplit)
return kLDivergenceMultiModalKLSplitApplyReduce
def kl_divergence_multi_modal_split_apply_combine(mu,std_dev,target_mus,target_stds,reduction_mode,summode=False):
num_targets=target_mus.size()[-one]
target_mus=target_mus.view((-one)*tuple(mu.size()[:-one])+tuple([num_targets])+tuple(mu.size()[-one:]))
target_stds=target_stds.view((-one)*tuple(std_dev.size()[:-one])+tuple([num_targets])+tuple(std_dev.size()[-one:]))
reduce_dims=tuple(range(-(len(tuple(mu.size()))),-(len(tuple(mu.size())))-(len(tuple(mu.size())))-two))
reduce_dims=tuple(i for i inn reduce_dims if i != -(len(tuple(mu.size()))))
kLdivergencemulti_modal_split_apply_combine=(
log(target_stds/std_dev)+
std_dev**two_thirds+ target_stds**two_thirds / std_dev**two_thirds-one+
log(target_stds/( target_mus-target_stds )) + target_mus / target_stds-target_mus / std_dev+
log(target_stds /( target_mus + target_stds )) + target_mus / target_stds + target_mus / std_dev-
log(target_stds /( target_mus-target_stds )) * std_dev ** (-two_thirds)-
log(target_stds /( target_mus + target_stds )) * std_dev ** (-two_thirds))
reductions={
‘mean’: lambda t:t.mean(dim=(-two)),
‘none’: lambda t:t,
‘sum’: lambda t:t.sum(dim=(-two)),
}
reductions.get(reduction_mode,’mean’)
if summode==False:
kLdivergencemulti_modal_split_apply_combine=reductions(reductions)(kLdivergencemulti_modal_split_apply_combine)
return kLdivergencemulti_modal_split_apply_combine
else:
summodesplits=kLdivergencemulti_modal_split_apply_combine.split(tuple(list(map(lambda y:y//min(list(map(lambda z:z>=y,z=targetMUs))), range(max(list(map(lambda z:z>=max(targeMUs),z=targeMUs))))))))
summodesplitsapplyreduce=summodesplits.map(reductions)(summodesplits)
return summodesplitsapplyreduce
def kl_divergence_standard_normal_multi_modal(mu,std_Dev,target_Mu=None,target_Sigma=None,reduction_Mode=None,summode=False):
mu_batch_size,mu_batch_num_points,mu_batch_num_dims=muxormu.batch_size,xorrmu.batch_num_points,xorrmu.batch_num_dims
mu_flatview=_flatten_tensor_by_batch_num_points_and_batch_num_dims(muxormu)
mu_flatview_shape=_get_flattened_tensor_shapes_for_batch_num_points_and_batch_num_dims(tensor_or_tensors=(muxormu,),shapes_for_unflattened=(mu_batch_size,mu_batch_num_points,mu_batch_num_dims))[muxormu]
mu_flatview_index_range=tensor_arange_like(tensor_or_tensors=(mu_flatview,),shapes_for_unflattened=(mu_flatview_shape,),value_ranges=((zero,mu_flatview.nelement()),))[mu_flatview]
mu_repeated_view,_=_repeat_tensor_or_tensors_along_new_axis(tensor_or_tensors=(mu_flatview,),new_axis_position=(-three,),multiplicities=(muxormu.nelement(),))[muxormu]
mu_repeated_view_index_range,_=_repeat_tensor_or_tensors_along_new_axis(tensor_or_tensors=(mu_flatview_index_range,),new_axis_position=(-three,),multiplicities=(muxormu.nelement(),))[muxormu]
standard_normal_random_drawn_variable,_=_sample_standard_normal(shape_=tensor_shapes_for_broadcasting_compatibility(tensors_with_shapes=[tensor_shapes_for_broadcasting_compatibility(tensors_with_shapes=[mu_repeated_view])[muxormu]]))
standard_normal_random_drawn_variable_squared,_=_square_tensor_or_tensors(tensor_or_tensors=(standard_normal_random_drawn_variable,),)[standard_normal_random_drawn_variable]
random_drawn_variables_standard_normal_squared_subtracted_by_one=_subtract_scalar_constant_from_tensor_or_tensors(constant_value=(- one ),tensor_or_tensors=(standard_normal_random_drawn_variable_squared,),)[standard_normal