python numpy sum函数_Python numpy.nansum() 使用实例

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def plot_power_rose(wind_directions,power,num_wd_bins):

"""Plot a power rose. Kind of a hacked wind rose.

Arguments:

wind_directions -- a np array of wind directions filtered for icing

power -- a np array of percent power production corresponding to wind_directions

num_wd_bins -- the number of wind direction bins to include on the rose.

"""

dir_bins = np.array(np.linspace(0.0,360.0 - 360.0 / num_wd_bins,num_wd_bins))

#Find the total amount of power produced in each sector.

dir_power = np.array([np.nansum(filter_obstacles(power,wind_directions,(wd + 180.0) % 360.0, 360 - 360/float(num_wd_bins))) for wd in dir_bins])

dir_power = np.round(dir_power * 100.0 / np.nansum(dir_power), decimals=0) #Normalize it and round to nearest int.

proportional_wd = np.array([])

for i in range(len(dir_power)):

for n in range(int(dir_power[i])): #Loop as many times as the percent of power produced in this sector.

proportional_wd = np.append(proportional_wd,dir_bins[i]) #i.e., if 50% of power comes from the south, append 50 instances of 180.0 degrees.

ones = np.ones(len(proportional_wd))

ax = new_axes()

ax.bar(proportional_wd, ones,normed=False, opening=0.8, edgecolor='white', bins = [0.0,100.], cmap=cm.RdGy)

set_legend(ax)

Example 2

def ests_ll_quad(self, params):

"""

Calculate the loglikelihood given model parameters `params`.

This method uses Gaussian quadrature, and thus returns an *approximate*

integral.

"""

mu0, gamma0, err0 = np.split(params, 3)

x = np.tile(self.z, (self.cfg.QCOUNT, 1, 1)) # (QCOUNTXnhospXnmeas)

loc = mu0 + np.outer(QC1, gamma0)

loc = np.tile(loc, (self.n, 1, 1))

loc = np.transpose(loc, (1, 0, 2))

scale = np.tile(err0, (self.cfg.QCOUNT, self.n, 1))

zs = lpdf_3d(x=x, loc=loc, scale=scale)

w2 = np.tile(self.w, (self.cfg.QCOUNT, 1, 1))

wted = np.nansum(w2 * zs, axis=2).T # (nhosp X QCOUNT)

qh = np.tile(QC1, (self.n, 1)) # (nhosp X QCOUNT)

combined = wted + norm.logpdf(qh) # (nhosp X QCOUNT)

return logsumexp(np.nan_to_num(combined), b=QC2, axis=1) # (nhosp)

Example 3

def chi2(b, dataset, model1='phoebe1model', model2='phoebe2model'):

ds = b.get_dataset(dataset) - b.get_dataset(dataset, method='*dep')

if ds.method=='lc':

depvar = 'fluxes'

elif ds.method=='rv':

depvar = 'rvs'

else:

raise NotImplementedError("chi2 doesn't support dataset method: '{}'".format(ds.method))

chi2 = 0.0

for comp in ds.components if len(ds.components) else [None]:

if comp=='_default':

continue

# phoebe gives nans for RVs when a star is completely eclipsed, whereas

# phoebe1 will give a value. So let's use nansum to just ignore those

# regions of the RV curve

print "***", depvar, dataset, model1, model2, comp

chi2 += np.nansum((b.get_value(qualifier=depvar, dataset=dataset, model=model1, component=comp, context='model')\

-b.get_value(qualifier=depvar, dataset=dataset, model=model2, component=comp, context='model'))**2)

return chi2

Example 4

def weighted_average(weights, pep_abd, group_ix):

'''

Calculate weighted geometric means for sample groups

Inputs:

weights: weights of peptides after filtering by loading threshold

pep_abd: peptide abundances after filtering by loading threshold

group_ix: array indexes of sample groups

'''

global nGroups

abd_w = pep_abd * weights[..., None]

one_w = abd_w / abd_w * weights[..., None]

a_sums = np.nansum(abd_w, axis=0)

w_sums = np.nansum(one_w, axis=0)

expr = np.empty(nGroups)

for i in range(expr.shape[0]):

expr[i] = a_sums[group_ix[i]].sum() / w_sums[group_ix[i]].sum()

return expr

Example 5

def pwdist_canberra(self, seq1idx, seq2idx):

"""Compute the Canberra distance between two vectors.

References:

1. http://scipy.org/

Notes:

When `u[i]` and `v[i]` are 0 for given i, then

the fraction 0/0 = 0 is used in the calculation.

"""

u = self[seq1idx]

v = self[seq2idx]

olderr = np.seterr(invalid='ignore')

try:

d = np.nansum(abs(u - v) / (abs(u) + abs(v)))

finally:

np.seterr(**olderr)

return d

Example 6

def normalize(self, to=1.0):

"""

This function ...

:param to:

:return:

"""

# Calculate the sum of all the pixels

sum = np.nansum(self)

# Calculate the conversion factor

factor = to / sum

# Multiply the frame with the conversion factor

self.__imul__(factor)

# -----------------------------------------------------------------

Example 7

def normalize(self, to=1.0):

"""

This function ...

:param to:

:return:

"""

# Calculate the sum of all the pixels

sum = np.nansum(self)

# Calculate the conversion factor

factor = to / sum

# Multiply the frame with the conversion factor

self.__imul__(factor)

# -----------------------------------------------------------------

Example 8

def calculate_optimizer_time(trials):

optimizer_time = []

time_idx = 0

optimizer_time.append(trials.cv_starttime[0] - trials.starttime[time_idx])

for i in range(len(trials.cv_starttime[1:])):

if trials.cv_starttime[i + 1] > trials.endtime[time_idx]:

optimizer_time.append(trials.endtime[time_idx] -

trials.cv_endtime[i])

time_idx += 1

optimizer_time.append(trials.cv_starttime[i + 1] -

trials.starttime[time_idx])

else:

optimizer_time.append(trials.cv_starttime[i + 1] -

trials.cv_endtime[i])

optimizer_time.append(trials.endtime[time_idx] - trials.cv_endtime[-1])

trials.optimizer_time = optimizer_time

# We need to import numpy again

import numpy as np

return np.nansum(optimizer_time)

Example 9

def lnlike(self, pars):

# Pull theta out of pars

theta = pars[:self.Nbins]

# Generate the inner summation

gamma = np.ones_like(self.bin_idx) * np.nan

good = (self.bin_idx < self.Nbins) & (self.bin_idx >= 0) # nans in q get put in nonexistent bins

gamma[good] = self.Nobs * self.censoring_fcn(self.mcmc_samples[good]) * theta[self.bin_idx[good]]

summation = np.nanmean(gamma, axis=1)

# Calculate the integral

I = self._integral_fcn(theta)

# Generate the log-likelihood

ll = -I + np.nansum(np.log(summat

你可能感兴趣的:(python,numpy,sum函数)