query stringlengths 1 46.9k | pos stringlengths 75 104k | neg listlengths 12 12 | scores listlengths 12 12 |
|---|---|---|---|
Estimate discontinuity in basis of low resolution image segmentation.
:return: discontinuity in low resolution | def __msgc_step3_discontinuity_localization(self):
"""
Estimate discontinuity in basis of low resolution image segmentation.
:return: discontinuity in low resolution
"""
import scipy
start = self._start_time
seg = 1 - self.segmentation.astype(np.int8)
self.stats["low level object voxels"] = np.sum(seg)
self.stats["low level image voxels"] = np.prod(seg.shape)
# in seg is now stored low resolution segmentation
# back to normal parameters
# step 2: discontinuity localization
# self.segparams = sparams_hi
seg_border = scipy.ndimage.filters.laplace(seg, mode="constant")
logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None))
# logger.debug(str(np.max(seg_border)))
# logger.debug(str(np.min(seg_border)))
seg_border[seg_border != 0] = 1
logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None))
# scipy.ndimage.morphology.distance_transform_edt
boundary_dilatation_distance = self.segparams["boundary_dilatation_distance"]
seg = scipy.ndimage.morphology.binary_dilation(
seg_border,
# seg,
np.ones(
[
(boundary_dilatation_distance * 2) + 1,
(boundary_dilatation_distance * 2) + 1,
(boundary_dilatation_distance * 2) + 1,
]
),
)
if self.keep_temp_properties:
self.temp_msgc_lowres_discontinuity = seg
else:
self.temp_msgc_lowres_discontinuity = None
if self.debug_images:
import sed3
pd = sed3.sed3(seg_border) # ), contour=seg)
pd.show()
pd = sed3.sed3(seg) # ), contour=seg)
pd.show()
# segzoom = scipy.ndimage.interpolation.zoom(seg.astype('float'), zoom,
# order=0).astype('int8')
self.stats["t3"] = time.time() - start
return seg | [
"def branchScale(self):\n \"\"\"See docs for `Model` abstract base class.\"\"\"\n bs = -(self.prx * scipy.diagonal(self.Prxy, axis1=1, axis2=2)\n ).sum() * self.mu / float(self.nsites)\n assert bs > 0\n return bs",
"def branchScale(self):\n \"\"\"See docs for `Mod... | [
0.6861903071403503,
0.6760287284851074,
0.6727304458618164,
0.6622427105903625,
0.6478081941604614,
0.6416714191436768,
0.6381795406341553,
0.6336018443107605,
0.633543848991394,
0.6316496729850769,
0.6313363313674927,
0.6291797757148743
] |
Run Graph-Cut segmentation with refinement of low resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use without `use_boundary_penalties` | def __multiscale_gc_lo2hi_run(self): # , pyed):
"""
Run Graph-Cut segmentation with refinement of low resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use without `use_boundary_penalties`
"""
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
self._msgc_lo2hi_resize_init()
self.__msgc_step0_init()
hard_constraints = self.__msgc_step12_low_resolution_segmentation()
# ===== high resolution data processing
seg = self.__msgc_step3_discontinuity_localization()
self.stats["t3.1"] = (time.time() - self._start_time)
graph = Graph(
seg,
voxelsize=self.voxelsize,
nsplit=self.segparams["block_size"],
edge_weight_table=self._msgc_npenalty_table,
compute_low_nodes_index=True,
)
# graph.run() = graph.generate_base_grid() + graph.split_voxels()
# graph.run()
graph.generate_base_grid()
self.stats["t3.2"] = (time.time() - self._start_time)
graph.split_voxels()
self.stats["t3.3"] = (time.time() - self._start_time)
self.stats.update(graph.stats)
self.stats["t4"] = (time.time() - self._start_time)
mul_mask, mul_val = self.__msgc_tlinks_area_weight_from_low_segmentation(seg)
area_weight = 1
unariesalt = self.__create_tlinks(
self.img,
self.voxelsize,
self.seeds,
area_weight=area_weight,
hard_constraints=hard_constraints,
mul_mask=None,
mul_val=None,
)
# N-links prepared
self.stats["t5"] = (time.time() - self._start_time)
un, ind = np.unique(graph.msinds, return_index=True)
self.stats["t6"] = (time.time() - self._start_time)
self.stats["t7"] = (time.time() - self._start_time)
unariesalt2_lo2hi = np.hstack(
[unariesalt[ind, 0, 0].reshape(-1, 1), unariesalt[ind, 0, 1].reshape(-1, 1)]
)
nlinks_lo2hi = np.hstack([graph.edges, graph.edges_weights.reshape(-1, 1)])
if self.debug_images:
import sed3
ed = sed3.sed3(unariesalt[:, :, 0].reshape(self.img.shape))
ed.show()
import sed3
ed = sed3.sed3(unariesalt[:, :, 1].reshape(self.img.shape))
ed.show()
# ed = sed3.sed3(seg)
# ed.show()
# import sed3
# ed = sed3.sed3(graph.data)
# ed.show()
# import sed3
# ed = sed3.sed3(graph.msinds)
# ed.show()
# nlinks, unariesalt2, msinds = self.__msgc_step45678_construct_graph(area_weight, hard_constraints, seg)
# self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)
self.__msgc_step9_finish_perform_gc_and_reshape(
nlinks_lo2hi, unariesalt2_lo2hi, graph.msinds
)
self._msgc_lo2hi_resize_clean_finish() | [
"def __multiscale_gc_hi2lo_run(self): # , pyed):\n \"\"\"\n Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph.\n In first step is performed normal GC on low resolution data\n Second step construct finer grid on edges of segmentation from first\n step... | [
0.8354851007461548,
0.717715322971344,
0.7040823698043823,
0.6870102882385254,
0.6714046597480774,
0.6607586145401001,
0.6603902578353882,
0.6547603011131287,
0.6508672833442688,
0.6498038172721863,
0.6489167213439941,
0.641808032989502
] |
Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use without `use_boundary_penalties` | def __multiscale_gc_hi2lo_run(self): # , pyed):
"""
Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use without `use_boundary_penalties`
"""
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
self.__msgc_step0_init()
hard_constraints = self.__msgc_step12_low_resolution_segmentation()
# ===== high resolution data processing
seg = self.__msgc_step3_discontinuity_localization()
nlinks, unariesalt2, msinds = self.__msgc_step45678_hi2lo_construct_graph(
hard_constraints, seg
)
self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds) | [
"def __multiscale_gc_lo2hi_run(self): # , pyed):\n \"\"\"\n Run Graph-Cut segmentation with refinement of low resolution multiscale graph.\n In first step is performed normal GC on low resolution data\n Second step construct finer grid on edges of segmentation from first\n step.\... | [
0.846430778503418,
0.7215781211853027,
0.7195752263069153,
0.685760498046875,
0.6824662089347839,
0.6763722896575928,
0.6726657152175903,
0.6619572043418884,
0.6599618792533875,
0.6594784259796143,
0.6542990803718567,
0.6521185040473938
] |
Return values (intensities) by indexes.
Used for multiscale graph cut.
data = [[0 1 1],
[0 2 2],
[0 2 2]]
inds = [[0 1 2],
[3 4 4],
[5 4 4]]
return: [0, 1, 1, 0, 2, 0]
If the data are not consistent, it will take the maximal value | def __ordered_values_by_indexes(self, data, inds):
"""
Return values (intensities) by indexes.
Used for multiscale graph cut.
data = [[0 1 1],
[0 2 2],
[0 2 2]]
inds = [[0 1 2],
[3 4 4],
[5 4 4]]
return: [0, 1, 1, 0, 2, 0]
If the data are not consistent, it will take the maximal value
"""
# get unique labels and their first indexes
# lab, linds = np.unique(inds, return_index=True)
# compute values by indexes
# values = data.reshape(-1)[linds]
# alternative slow implementation
# if there are different data on same index, it will take
# maximal value
# lab = np.unique(inds)
# values = [0]*len(lab)
# for label in lab:
# values[label] = np.max(data[inds == label])
#
# values = np.asarray(values)
# yet another implementation
values = [None] * (np.max(inds) + 1)
linear_inds = inds.ravel()
linear_data = data.ravel()
for i in range(0, len(linear_inds)):
# going over all data pixels
if values[linear_inds[i]] is None:
# this index is found for first
values[linear_inds[i]] = linear_data[i]
elif values[linear_inds[i]] < linear_data[i]:
# here can be changed maximal or minimal value
values[linear_inds[i]] = linear_data[i]
values = np.asarray(values)
return values | [
"def get_maximum_index(indices):\n \"\"\"Internally used.\"\"\"\n def _maximum_idx_single(idx):\n if isinstance(idx, slice):\n start = -1\n stop = 0\n if idx.start is not None:\n start = idx.start.__index__()\n if idx.stop is not None:\n ... | [
0.7258252501487732,
0.7097887396812439,
0.7016910314559937,
0.7010431885719299,
0.6889966130256653,
0.6820541024208069,
0.678980827331543,
0.6788231134414673,
0.676472544670105,
0.6743552684783936,
0.6736085414886475,
0.672756016254425
] |
Function computes multiscale indexes of ndarray.
mask: Says where is original resolution (0) and where is small
resolution (1). Mask is in small resolution.
orig_shape: Original shape of input data.
zoom: Usually number greater then 1
result = [[0 1 2],
[3 4 4],
[5 4 4]] | def __hi2lo_multiscale_indexes(self, mask, orig_shape): # , zoom):
"""
Function computes multiscale indexes of ndarray.
mask: Says where is original resolution (0) and where is small
resolution (1). Mask is in small resolution.
orig_shape: Original shape of input data.
zoom: Usually number greater then 1
result = [[0 1 2],
[3 4 4],
[5 4 4]]
"""
mask_orig = zoom_to_shape(mask, orig_shape, dtype=np.int8)
inds_small = np.arange(mask.size).reshape(mask.shape)
inds_small_in_orig = zoom_to_shape(inds_small, orig_shape, dtype=np.int8)
inds_orig = np.arange(np.prod(orig_shape)).reshape(orig_shape)
# inds_orig = inds_orig * mask_orig
inds_orig += np.max(inds_small_in_orig) + 1
# print 'indexes'
# import py3DSeedEditor as ped
# import pdb; pdb.set_trace() # BREAKPOINT
# '==' is not the same as 'is' for numpy.array
inds_small_in_orig[mask_orig == True] = inds_orig[mask_orig == True] # noqa
inds = inds_small_in_orig
# print np.max(inds)
# print np.min(inds)
inds = relabel_squeeze(inds)
logger.debug(
"Index after relabeling: %s", scipy.stats.describe(inds, axis=None)
)
# logger.debug("Minimal index after relabeling: " + str(np.min(inds)))
# inds_orig[mask_orig==True] = 0
# inds_small_in_orig[mask_orig==False] = 0
# inds = (inds_orig + np.max(inds_small_in_orig) + 1) + inds_small_in_orig
return inds, mask_orig | [
"def construct_zernike_polynomials(x, y, zernike_indexes, mask=None, weight=None):\n \"\"\"Return the zerike polynomials for all objects in an image\n \n x - the X distance of a point from the center of its object\n y - the Y distance of a point from the center of its object\n zernike_indexes - an Nx... | [
0.6771979928016663,
0.6765623092651367,
0.6748027205467224,
0.6736266016960144,
0.6680997014045715,
0.6649146676063538,
0.6602632999420166,
0.6561344265937805,
0.65461266040802,
0.6545136570930481,
0.6541271209716797,
0.6522963047027588
] |
Interactive seed setting with 3d seed editor | def interactivity(self, min_val=None, max_val=None, qt_app=None):
"""
Interactive seed setting with 3d seed editor
"""
from .seed_editor_qt import QTSeedEditor
from PyQt4.QtGui import QApplication
if min_val is None:
min_val = np.min(self.img)
if max_val is None:
max_val = np.max(self.img)
window_c = (max_val + min_val) / 2 # .astype(np.int16)
window_w = max_val - min_val # .astype(np.int16)
if qt_app is None:
qt_app = QApplication(sys.argv)
pyed = QTSeedEditor(
self.img,
modeFun=self.interactivity_loop,
voxelSize=self.voxelsize,
seeds=self.seeds,
volume_unit=self.volume_unit,
)
pyed.changeC(window_c)
pyed.changeW(window_w)
qt_app.exec_() | [
"def set_seed(seed: int):\n \"\"\" Set random seed for python, numpy and pytorch RNGs \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.random.manual_seed(seed)",
"def set_seeds(self, seeds):\n \"\"\"\n Function for manual seed setting. Sets variable seeds and prepares\n v... | [
0.7103196382522583,
0.7084529399871826,
0.7084075212478638,
0.7056494355201721,
0.6945380568504333,
0.6907649636268616,
0.6894176006317139,
0.6885712146759033,
0.6882632374763489,
0.6879781484603882,
0.6860505938529968,
0.6801682710647583
] |
Function for manual seed setting. Sets variable seeds and prepares
voxels for density model.
:param seeds: ndarray (0 - nothing, 1 - object, 2 - background,
3 - object just hard constraints, no model training, 4 - background
just hard constraints, no model training) | def set_seeds(self, seeds):
"""
Function for manual seed setting. Sets variable seeds and prepares
voxels for density model.
:param seeds: ndarray (0 - nothing, 1 - object, 2 - background,
3 - object just hard constraints, no model training, 4 - background
just hard constraints, no model training)
"""
if self.img.shape != seeds.shape:
raise Exception("Seeds must be same size as input image")
self.seeds = seeds.astype("int8")
self.voxels1 = self.img[self.seeds == 1]
self.voxels2 = self.img[self.seeds == 2] | [
"def __set_hard_hard_constraints(self, tdata1, tdata2, seeds):\n \"\"\"\n it works with seed labels:\n 0: nothing\n 1: object 1 - full seeds\n 2: object 2 - full seeds\n 3: object 1 - not a training seeds\n 4: object 2 - not a training seeds\n \"\"\"\n ... | [
0.724362850189209,
0.7048511505126953,
0.6971395015716553,
0.6954063177108765,
0.6898720860481262,
0.6891217231750488,
0.6812682151794434,
0.6777561902999878,
0.6749814748764038,
0.6689315438270569,
0.6673877835273743,
0.6638182401657104
] |
Run the Graph Cut segmentation according to preset parameters.
:param run_fit_model: Allow to skip model fit when the model is prepared before
:return: | def run(self, run_fit_model=True):
"""
Run the Graph Cut segmentation according to preset parameters.
:param run_fit_model: Allow to skip model fit when the model is prepared before
:return:
"""
if run_fit_model:
self.fit_model(self.img, self.voxelsize, self.seeds)
self._start_time = time.time()
if self.segparams["method"].lower() in ("graphcut", "gc"):
self.__single_scale_gc_run()
elif self.segparams["method"].lower() in (
"multiscale_graphcut",
"multiscale_gc",
"msgc",
"msgc_lo2hi",
"lo2hi",
"multiscale_graphcut_lo2hi",
):
logger.debug("performing multiscale Graph-Cut lo2hi")
self.__multiscale_gc_lo2hi_run()
elif self.segparams["method"].lower() in (
"msgc_hi2lo",
"hi2lo",
"multiscale_graphcut_hi2lo",
):
logger.debug("performing multiscale Graph-Cut hi2lo")
self.__multiscale_gc_hi2lo_run()
else:
logger.error("Unknown segmentation method: " + self.segparams["method"]) | [
"def model_segments(copy_file, work_dir, paired):\n \"\"\"Perform segmentation on input copy number log2 ratio file.\n \"\"\"\n out_file = os.path.join(work_dir, \"%s.cr.seg\" % dd.get_sample_name(paired.tumor_data))\n tumor_counts, normal_counts = heterogzygote_counts(paired)\n if not utils.file_exi... | [
0.6835083961486816,
0.6589675545692444,
0.6549115777015686,
0.6541705131530762,
0.6504920125007629,
0.6443780660629272,
0.6398264765739441,
0.6389582753181458,
0.6380501985549927,
0.6336901187896729,
0.6321241855621338,
0.6307634711265564
] |
it works with seed labels:
0: nothing
1: object 1 - full seeds
2: object 2 - full seeds
3: object 1 - not a training seeds
4: object 2 - not a training seeds | def __set_hard_hard_constraints(self, tdata1, tdata2, seeds):
"""
it works with seed labels:
0: nothing
1: object 1 - full seeds
2: object 2 - full seeds
3: object 1 - not a training seeds
4: object 2 - not a training seeds
"""
seeds_mask = (seeds == 1) | (seeds == 3)
tdata2[seeds_mask] = np.max(tdata2) + 1
tdata1[seeds_mask] = 0
seeds_mask = (seeds == 2) | (seeds == 4)
tdata1[seeds_mask] = np.max(tdata1) + 1
tdata2[seeds_mask] = 0
return tdata1, tdata2 | [
"def get_seed_sub(self, label):\r\n \"\"\" Return list of all seeds with specific label\r\n \"\"\"\r\n sx, sy, sz = np.nonzero(self.seeds == label)\r\n\r\n return sx, sy, sz",
"def identify(label, column_type = :integer)\n if column_type == :uuid\n NamedSeeds.uuid_v5(label)... | [
0.7497216463088989,
0.70253586769104,
0.6890901327133179,
0.6859575510025024,
0.6842992901802063,
0.6806407570838928,
0.6781275868415833,
0.6768274307250977,
0.674925684928894,
0.6744183897972107,
0.6728851795196533,
0.6726276278495789
] |
Compute edge values for graph cut tlinks based on image intensity
and texture. | def __similarity_for_tlinks_obj_bgr(
self,
data,
voxelsize,
# voxels1, voxels2,
# seeds, otherfeatures=None
):
"""
Compute edge values for graph cut tlinks based on image intensity
and texture.
"""
# self.fit_model(data, voxelsize, seeds)
# There is a need to have small vaues for good fit
# R(obj) = -ln( Pr (Ip | O) )
# R(bck) = -ln( Pr (Ip | B) )
# Boykov2001b
# ln is computed in likelihood
tdata1 = (-(self.mdl.likelihood_from_image(data, voxelsize, 1))) * 10
tdata2 = (-(self.mdl.likelihood_from_image(data, voxelsize, 2))) * 10
# to spare some memory
dtype = np.int16
if np.any(tdata1 > 32760):
dtype = np.float32
if np.any(tdata2 > 32760):
dtype = np.float32
if self.segparams["use_apriori_if_available"] and self.apriori is not None:
logger.debug("using apriori information")
gamma = self.segparams["apriori_gamma"]
a1 = (-np.log(self.apriori * 0.998 + 0.001)) * 10
a2 = (-np.log(0.999 - (self.apriori * 0.998))) * 10
# logger.debug('max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1)))
# logger.debug('max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2)))
# logger.debug('max ' + str(np.max(a1)) + ' min ' + str(np.min(a1)))
# logger.debug('max ' + str(np.max(a2)) + ' min ' + str(np.min(a2)))
tdata1u = (((1 - gamma) * tdata1) + (gamma * a1)).astype(dtype)
tdata2u = (((1 - gamma) * tdata2) + (gamma * a2)).astype(dtype)
tdata1 = tdata1u
tdata2 = tdata2u
# logger.debug(' max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1)))
# logger.debug(' max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2)))
# logger.debug('gamma ' + str(gamma))
# import sed3
# ed = sed3.show_slices(tdata1)
# ed = sed3.show_slices(tdata2)
del tdata1u
del tdata2u
del a1
del a2
# if np.any(tdata1 < 0) or np.any(tdata2 <0):
# logger.error("Problem with tlinks. Likelihood is < 0")
# if self.debug_images:
# self.__show_debug_tdata_images(tdata1, tdata2, suptitle="likelihood")
return tdata1, tdata2 | [
"def __ordered_values_by_indexes(self, data, inds):\n \"\"\"\n Return values (intensities) by indexes.\n\n Used for multiscale graph cut.\n data = [[0 1 1],\n [0 2 2],\n [0 2 2]]\n\n inds = [[0 1 2],\n [3 4 4],\n [5 4 4]]... | [
0.6722548007965088,
0.6709698438644409,
0.6678282618522644,
0.6651525497436523,
0.6602543592453003,
0.6581569910049438,
0.6538912057876587,
0.6536920666694641,
0.6516335606575012,
0.6476464867591858,
0.6469390988349915,
0.6459125280380249
] |
Compute nlinks grid from data shape information. For boundary penalties
are data (intensities) values are used.
ins: Default is None. Used for multiscale GC. This are indexes of
multiscale pixels. Next example shows one superpixel witn index 2.
inds = [
[1 2 2],
[3 2 2],
[4 5 6]]
boundary_penalties_fcn: is function with one argument - axis. It can
it can be used for setting penalty weights between neighbooring
pixels. | def __create_nlinks(self, data, inds=None, boundary_penalties_fcn=None):
"""
Compute nlinks grid from data shape information. For boundary penalties
are data (intensities) values are used.
ins: Default is None. Used for multiscale GC. This are indexes of
multiscale pixels. Next example shows one superpixel witn index 2.
inds = [
[1 2 2],
[3 2 2],
[4 5 6]]
boundary_penalties_fcn: is function with one argument - axis. It can
it can be used for setting penalty weights between neighbooring
pixels.
"""
# use the gerneral graph algorithm
# first, we construct the grid graph
start = time.time()
if inds is None:
inds = np.arange(data.size).reshape(data.shape)
# if not self.segparams['use_boundary_penalties'] and \
# boundary_penalties_fcn is None :
if boundary_penalties_fcn is None:
# This is faster for some specific format
edgx = np.c_[inds[:, :, :-1].ravel(), inds[:, :, 1:].ravel()]
edgy = np.c_[inds[:, :-1, :].ravel(), inds[:, 1:, :].ravel()]
edgz = np.c_[inds[:-1, :, :].ravel(), inds[1:, :, :].ravel()]
else:
logger.info("use_boundary_penalties")
bpw = self.segparams["boundary_penalties_weight"]
bpa = boundary_penalties_fcn(2)
# id1=inds[:, :, :-1].ravel()
edgx = np.c_[
inds[:, :, :-1].ravel(),
inds[:, :, 1:].ravel(),
# cc * np.ones(id1.shape)
bpw * bpa[:, :, 1:].ravel(),
]
bpa = boundary_penalties_fcn(1)
# id1 =inds[:, 1:, :].ravel()
edgy = np.c_[
inds[:, :-1, :].ravel(),
inds[:, 1:, :].ravel(),
# cc * np.ones(id1.shape)]
bpw * bpa[:, 1:, :].ravel(),
]
bpa = boundary_penalties_fcn(0)
# id1 = inds[1:, :, :].ravel()
edgz = np.c_[
inds[:-1, :, :].ravel(),
inds[1:, :, :].ravel(),
# cc * np.ones(id1.shape)]
bpw * bpa[1:, :, :].ravel(),
]
# import pdb; pdb.set_trace()
edges = np.vstack([edgx, edgy, edgz]).astype(np.int32)
# edges - seznam indexu hran, kteres spolu sousedi\
elapsed = time.time() - start
self.stats["_create_nlinks time"] = elapsed
logger.info("__create nlinks time " + str(elapsed))
return edges | [
"def __ms_npenalty_fcn(self, axis, mask, orig_shape):\n \"\"\"\n :param axis: direction of edge\n :param mask: 3d ndarray with ones where is fine resolution\n\n Neighboorhood penalty between small pixels should be smaller then in\n bigger tiles. This is the way how to set it.\n\n ... | [
0.7182618975639343,
0.7100909948348999,
0.699677050113678,
0.6798095107078552,
0.6643567681312561,
0.6640481948852539,
0.6634003520011902,
0.6623707413673401,
0.6592199802398682,
0.6557827591896057,
0.6554999947547913,
0.6552016139030457
] |
Use actual model to calculate similarity. If no input is given the last image is used.
:param data3d:
:param voxelsize:
:param seeds:
:param area_weight:
:param hard_constraints:
:param return_unariesalt:
:return: | def debug_get_reconstructed_similarity(
self,
data3d=None,
voxelsize=None,
seeds=None,
area_weight=1,
hard_constraints=True,
return_unariesalt=False,
):
"""
Use actual model to calculate similarity. If no input is given the last image is used.
:param data3d:
:param voxelsize:
:param seeds:
:param area_weight:
:param hard_constraints:
:param return_unariesalt:
:return:
"""
if data3d is None:
data3d = self.img
if voxelsize is None:
voxelsize = self.voxelsize
if seeds is None:
seeds = self.seeds
unariesalt = self.__create_tlinks(
data3d,
voxelsize,
# voxels1, voxels2,
seeds,
area_weight,
hard_constraints,
)
if return_unariesalt:
return unariesalt
else:
return self._reshape_unariesalt_to_similarity(unariesalt, data3d.shape) | [
"def fit_from_image(self, data, voxelsize, seeds, unique_cls):\n \"\"\"\n This Method allows computes feature vector and train model.\n\n :cls: list of index number of requested classes in seeds\n \"\"\"\n fvs, clsselected = self.features_from_image(data, voxelsize, seeds, unique_... | [
0.6625571846961975,
0.6577945351600647,
0.6545608043670654,
0.6472549438476562,
0.6417547464370728,
0.6331488490104675,
0.6312707662582397,
0.6286358833312988,
0.6269406080245972,
0.6256269812583923,
0.6230696439743042,
0.6227918863296509
] |
Show tlinks.
:param data3d: ndarray with input data
:param voxelsize:
:param seeds:
:param area_weight:
:param hard_constraints:
:param show:
:param bins: histogram bins number
:param slice_number:
:return: | def debug_show_reconstructed_similarity(
self,
data3d=None,
voxelsize=None,
seeds=None,
area_weight=1,
hard_constraints=True,
show=True,
bins=20,
slice_number=None,
):
"""
Show tlinks.
:param data3d: ndarray with input data
:param voxelsize:
:param seeds:
:param area_weight:
:param hard_constraints:
:param show:
:param bins: histogram bins number
:param slice_number:
:return:
"""
unariesalt = self.debug_get_reconstructed_similarity(
data3d,
voxelsize=voxelsize,
seeds=seeds,
area_weight=area_weight,
hard_constraints=hard_constraints,
return_unariesalt=True,
)
self._debug_show_unariesalt(
unariesalt, show=show, bins=bins, slice_number=slice_number
) | [
"def show_slices(data3d, contour=None, seeds=None, axis=0, slice_step=None,\r\n shape=None, show=True,\r\n flipH=False, flipV=False,\r\n first_slice_offset=0,\r\n first_slice_offset_to_see_seed_with_label=None,\r\n slice_number=None\r\n ... | [
0.771639347076416,
0.664085865020752,
0.6366907358169556,
0.6365001797676086,
0.6328928470611572,
0.6294004917144775,
0.6291933059692383,
0.6279113292694092,
0.6263575553894043,
0.6245708465576172,
0.6240610480308533,
0.6238716244697571
] |
Get info about the node. See pycut.inspect_node() for details.
Processing is done in temporary shape.
:param node_seed:
:return: node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds | def debug_inspect_node(self, node_msindex):
"""
Get info about the node. See pycut.inspect_node() for details.
Processing is done in temporary shape.
:param node_seed:
:return: node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds
"""
return inspect_node(self.nlinks, self.unariesalt2, self.msinds, node_msindex) | [
"def inspect_node(nlinks, unariesalt, msinds, node_msindex):\n \"\"\"\n Get information about one node in graph\n\n :param nlinks: neighboorhood edges\n :param unariesalt: weights\n :param msinds: indexes in 3d image\n :param node_msindex: msindex of selected node. See get_node_msindex()\n :ret... | [
0.7683572173118591,
0.7625840902328491,
0.6913125514984131,
0.6483857035636902,
0.6416175961494446,
0.6361604332923889,
0.6354682445526123,
0.6320913434028625,
0.6299605369567871,
0.6297000646591187,
0.6281426548957825,
0.6263656616210938
] |
Call after segmentation to see selected node neighborhood.
User have to select one node by click.
:return: | def debug_interactive_inspect_node(self):
"""
Call after segmentation to see selected node neighborhood.
User have to select one node by click.
:return:
"""
if (
np.sum(
np.abs(
np.asarray(self.msinds.shape) - np.asarray(self.segmentation.shape)
)
)
== 0
):
segmentation = self.segmentation
else:
segmentation = self.temp_msgc_resized_segmentation
logger.info("Click to select one voxel of interest")
import sed3
ed = sed3.sed3(self.msinds, contour=segmentation == 0)
ed.show()
edseeds = ed.seeds
node_msindex = get_node_msindex(self.msinds, edseeds)
node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds = self.debug_inspect_node(
node_msindex
)
import sed3
ed = sed3.sed3(
self.msinds, contour=segmentation == 0, seeds=node_neighboor_seeds
)
ed.show()
return (
node_unariesalt,
node_neighboor_edges_and_weights,
node_neighboor_seeds,
node_msindex,
) | [
"def OnNodeSelected(self, event):\n \"\"\"We have selected a node with the list control, tell the world\"\"\"\n try:\n node = self.sorted[event.GetIndex()]\n except IndexError, err:\n log.warn(_('Invalid index in node selected: %(index)s'),\n index=even... | [
0.7131602764129639,
0.7058137059211731,
0.704730212688446,
0.7002614140510559,
0.697652280330658,
0.6866844892501831,
0.6853585839271545,
0.6828123927116394,
0.6754629611968994,
0.6754100322723389,
0.6728068590164185,
0.6713006496429443
] |
Setting of data.
You need set seeds if you want use hard_constraints. | def _ssgc_prepare_data_and_run_computation(
self,
# voxels1, voxels2,
hard_constraints=True,
area_weight=1,
):
"""
Setting of data.
You need set seeds if you want use hard_constraints.
"""
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import pdb; pdb.set_trace() # BREAKPOINT
unariesalt = self.__create_tlinks(
self.img,
self.voxelsize,
# voxels1, voxels2,
self.seeds,
area_weight,
hard_constraints,
)
# některém testu organ semgmentation dosahují unaries -15. což je podiné
# stačí vyhodit print před if a je to vidět
logger.debug("unaries %.3g , %.3g" % (np.max(unariesalt), np.min(unariesalt)))
# create potts pairwise
# pairwiseAlpha = -10
pairwise = -(np.eye(2) - 1)
pairwise = (self.segparams["pairwise_alpha"] * pairwise).astype(np.int32)
# pairwise = np.array([[0,30],[30,0]]).astype(np.int32)
# print pairwise
self.iparams = {}
if self.segparams["use_boundary_penalties"]:
sigma = self.segparams["boundary_penalties_sigma"]
# set boundary penalties function
# Default are penalties based on intensity differences
boundary_penalties_fcn = lambda ax: self._boundary_penalties_array(
axis=ax, sigma=sigma
)
else:
boundary_penalties_fcn = None
nlinks = self.__create_nlinks(
self.img, boundary_penalties_fcn=boundary_penalties_fcn
)
self.stats["tlinks shape"].append(unariesalt.reshape(-1, 2).shape)
self.stats["nlinks shape"].append(nlinks.shape)
# we flatten the unaries
# result_graph = cut_from_graph(nlinks, unaries.reshape(-1, 2),
# pairwise)
start = time.time()
if self.debug_images:
self._debug_show_unariesalt(unariesalt)
result_graph = pygco.cut_from_graph(nlinks, unariesalt.reshape(-1, 2), pairwise)
elapsed = time.time() - start
self.stats["gc time"] = elapsed
result_labeling = result_graph.reshape(self.img.shape)
return result_labeling | [
"def __set_hard_hard_constraints(self, tdata1, tdata2, seeds):\n \"\"\"\n it works with seed labels:\n 0: nothing\n 1: object 1 - full seeds\n 2: object 2 - full seeds\n 3: object 1 - not a training seeds\n 4: object 2 - not a training seeds\n \"\"\"\n ... | [
0.7334316968917847,
0.7288636565208435,
0.7227749824523926,
0.7101259231567383,
0.7063561677932739,
0.7062025666236877,
0.7042734026908875,
0.7031860947608948,
0.6948578357696533,
0.6918073892593384,
0.6902747750282288,
0.6894993185997009
] |
Function resize input data to specific shape.
:param data: input 3d array-like data
:param shape: shape of output data
:param zoom: zoom is used for back compatibility
:mode: default is 'nearest' | def resize_to_shape(data, shape, zoom=None, mode="nearest", order=0):
"""
Function resize input data to specific shape.
:param data: input 3d array-like data
:param shape: shape of output data
:param zoom: zoom is used for back compatibility
:mode: default is 'nearest'
"""
# @TODO remove old code in except part
# TODO use function from library in future
try:
# rint 'pred vyjimkou'
# aise Exception ('test without skimage')
# rint 'za vyjimkou'
import skimage
import skimage.transform
# Now we need reshape seeds and segmentation to original size
# with warnings.catch_warnings():
# warnings.filterwarnings("ignore", ".*'constant', will be changed to.*")
segm_orig_scale = skimage.transform.resize(
data, shape, order=0, preserve_range=True, mode="reflect"
)
segmentation = segm_orig_scale
logger.debug("resize to orig with skimage")
except:
if zoom is None:
zoom = shape / np.asarray(data.shape).astype(np.double)
segmentation = resize_to_shape_with_zoom(
data, zoom=zoom, mode=mode, order=order
)
return segmentation | [
"def resize_to_shape(data, shape, zoom=None, mode='nearest', order=0):\n \"\"\"\n Function resize input data to specific shape.\n\n :param data: input 3d array-like data\n :param shape: shape of output data\n :param zoom: zoom is used for back compatibility\n :mode: default is 'nearest'\n \"\"\... | [
0.7604760527610779,
0.7587894797325134,
0.7159501314163208,
0.7097681760787964,
0.6770105957984924,
0.6759569048881531,
0.666746199131012,
0.6527373790740967,
0.6487169861793518,
0.6436185240745544,
0.6436010599136353,
0.6350785493850708
] |
Smart zoom for sparse matrix. If there is resize to bigger resolution
thin line of label could be lost. This function prefers labels larger
then zero. If there is only one small voxel in larger volume with zeros
it is selected. | def seed_zoom(seeds, zoom):
"""
Smart zoom for sparse matrix. If there is resize to bigger resolution
thin line of label could be lost. This function prefers labels larger
then zero. If there is only one small voxel in larger volume with zeros
it is selected.
"""
# import scipy
# loseeds=seeds
labels = np.unique(seeds)
# remove first label - 0
labels = np.delete(labels, 0)
# @TODO smart interpolation for seeds in one block
# loseeds = scipy.ndimage.interpolation.zoom(
# seeds, zoom, order=0)
loshape = np.ceil(np.array(seeds.shape) * 1.0 / zoom).astype(np.int)
loseeds = np.zeros(loshape, dtype=np.int8)
loseeds = loseeds.astype(np.int8)
for label in labels:
a, b, c = np.where(seeds == label)
loa = np.round(a // zoom)
lob = np.round(b // zoom)
loc = np.round(c // zoom)
# loseeds = np.zeros(loshape)
loseeds[loa, lob, loc] += label
# this is to detect conflict seeds
loseeds[loseeds > label] = 100
# remove conflict seeds
loseeds[loseeds > 99] = 0
# import py3DSeedEditor
# ped = py3DSeedEditor.py3DSeedEditor(loseeds)
# ped.show()
return loseeds | [
"def _zoom(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5):\n \"Zoom image by `scale`. `row_pct`,`col_pct` select focal point of zoom.\"\n s = 1-1/scale\n col_c = s * (2*col_pct - 1)\n row_c = s * (2*row_pct - 1)\n return _get_zoom_mat(1/scale, 1/scale, col_c, row_c)",
"def zoom(self, ... | [
0.6950457692146301,
0.6948485374450684,
0.6808585524559021,
0.678534984588623,
0.676499605178833,
0.6748631596565247,
0.6726201772689819,
0.6691378951072693,
0.6671112775802612,
0.6661582589149475,
0.6641716957092285,
0.6616668701171875
] |
Zoom data to specific shape. | def zoom_to_shape(data, shape, dtype=None):
"""
Zoom data to specific shape.
"""
import scipy
import scipy.ndimage
zoomd = np.array(shape) / np.array(data.shape, dtype=np.double)
import warnings
datares = scipy.ndimage.interpolation.zoom(data, zoomd, order=0, mode="reflect")
if datares.shape != shape:
logger.warning("Zoom with different output shape")
dataout = np.zeros(shape, dtype=dtype)
shpmin = np.minimum(dataout.shape, shape)
dataout[: shpmin[0], : shpmin[1], : shpmin[2]] = datares[
: shpmin[0], : shpmin[1], : shpmin[2]
]
return datares | [
"def zoom(self, factor, order=1, verbose=True):\n \"\"\"Zoom the data array using spline interpolation of the requested order.\n\n The number of points along each axis is increased by factor.\n See `scipy ndimage`__ for more info.\n\n __ http://docs.scipy.org/doc/scipy/reference/\n ... | [
0.7957965135574341,
0.7436215281486511,
0.7239788174629211,
0.7158556580543518,
0.7105779051780701,
0.7090069055557251,
0.708440899848938,
0.7084274888038635,
0.7077401280403137,
0.707372784614563,
0.7045493125915527,
0.7032337188720703
] |
Crop the data.
crop(data, crinfo)
:param crinfo: min and max for each axis - [[minX, maxX], [minY, maxY], [minZ, maxZ]] | def crop(data, crinfo):
"""
Crop the data.
crop(data, crinfo)
:param crinfo: min and max for each axis - [[minX, maxX], [minY, maxY], [minZ, maxZ]]
"""
crinfo = fix_crinfo(crinfo)
return data[
__int_or_none(crinfo[0][0]) : __int_or_none(crinfo[0][1]),
__int_or_none(crinfo[1][0]) : __int_or_none(crinfo[1][1]),
__int_or_none(crinfo[2][0]) : __int_or_none(crinfo[2][1]),
] | [
"def uncrop(data, crinfo, orig_shape, resize=False, outside_mode=\"constant\", cval=0):\n \"\"\"\n Put some boundary to input image.\n\n\n :param data: input data\n :param crinfo: array with minimum and maximum index along each axis\n [[minX, maxX],[minY, maxY],[minZ, maxZ]]. If crinfo is None, t... | [
0.7339417338371277,
0.7194818258285522,
0.7174306511878967,
0.7124550938606262,
0.7115015387535095,
0.7108666896820068,
0.7031501531600952,
0.7022998929023743,
0.6970390677452087,
0.6903190612792969,
0.6898860335350037,
0.6828916072845459
] |
Combine two crinfos. First used is crinfo1, second used is crinfo2. | def combinecrinfo(crinfo1, crinfo2):
"""
Combine two crinfos. First used is crinfo1, second used is crinfo2.
"""
crinfo1 = fix_crinfo(crinfo1)
crinfo2 = fix_crinfo(crinfo2)
crinfo = [
[crinfo1[0][0] + crinfo2[0][0], crinfo1[0][0] + crinfo2[0][1]],
[crinfo1[1][0] + crinfo2[1][0], crinfo1[1][0] + crinfo2[1][1]],
[crinfo1[2][0] + crinfo2[2][0], crinfo1[2][0] + crinfo2[2][1]],
]
return crinfo | [
"static public int concatCrc(int crc1, int crc2, int order) {\n // Calculate CRC of crc1 + order's 0\n int crcForCrc1 = crc1;\n int orderRemained = order;\n\n // Fast transforming CRCs for adding 0 to the end of the byte array by table\n // look-up\n for (LookupTable lookupTable : lookupTables) {\... | [
0.7125424146652222,
0.7048804759979248,
0.6844689846038818,
0.6780837774276733,
0.6704990863800049,
0.6569040417671204,
0.6556092500686646,
0.6535033583641052,
0.6515312194824219,
0.6504814028739929,
0.6501571536064148,
0.6478806138038635
] |
Create crinfo of minimum orthogonal nonzero block in input data.
:param data: input data
:param margin: add margin to minimum block
:return: | def crinfo_from_specific_data(data, margin=0):
"""
Create crinfo of minimum orthogonal nonzero block in input data.
:param data: input data
:param margin: add margin to minimum block
:return:
"""
# hledáme automatický ořez, nonzero dá indexy
logger.debug("crinfo")
logger.debug(str(margin))
nzi = np.nonzero(data)
logger.debug(str(nzi))
if np.isscalar(margin):
margin = [margin] * 3
x1 = np.min(nzi[0]) - margin[0]
x2 = np.max(nzi[0]) + margin[0] + 1
y1 = np.min(nzi[1]) - margin[0]
y2 = np.max(nzi[1]) + margin[0] + 1
z1 = np.min(nzi[2]) - margin[0]
z2 = np.max(nzi[2]) + margin[0] + 1
# ošetření mezí polí
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if z1 < 0:
z1 = 0
if x2 > data.shape[0]:
x2 = data.shape[0] - 1
if y2 > data.shape[1]:
y2 = data.shape[1] - 1
if z2 > data.shape[2]:
z2 = data.shape[2] - 1
# ořez
crinfo = [[x1, x2], [y1, y2], [z1, z2]]
return crinfo | [
"def get_minimum_size(self, data):\n \"\"\"\n Minimum height is the total height + margins, minimum width\n is the largest width.\n \"\"\"\n min_width = 0\n height = 0\n for element in self.elements:\n size = element.get_minimum_size(data)\n min... | [
0.681165874004364,
0.6734849214553833,
0.6568872332572937,
0.6547254323959351,
0.6481010317802429,
0.6388972997665405,
0.638611376285553,
0.6367385387420654,
0.6363102793693542,
0.6296578049659729,
0.6294127702713013,
0.6288889050483704
] |
Put some boundary to input image.
:param data: input data
:param crinfo: array with minimum and maximum index along each axis
[[minX, maxX],[minY, maxY],[minZ, maxZ]]. If crinfo is None, the whole input image is placed into [0, 0, 0].
If crinfo is just series of three numbers, it is used as an initial point for input image placement.
:param orig_shape: shape of uncropped image
:param resize: True or False (default). Usefull if the data.shape does not fit to crinfo shape.
:param outside_mode: 'constant', 'nearest'
:return: | def uncrop(data, crinfo, orig_shape, resize=False, outside_mode="constant", cval=0):
"""
Put some boundary to input image.
:param data: input data
:param crinfo: array with minimum and maximum index along each axis
[[minX, maxX],[minY, maxY],[minZ, maxZ]]. If crinfo is None, the whole input image is placed into [0, 0, 0].
If crinfo is just series of three numbers, it is used as an initial point for input image placement.
:param orig_shape: shape of uncropped image
:param resize: True or False (default). Usefull if the data.shape does not fit to crinfo shape.
:param outside_mode: 'constant', 'nearest'
:return:
"""
if crinfo is None:
crinfo = list(zip([0] * data.ndim, orig_shape))
elif np.asarray(crinfo).size == data.ndim:
crinfo = list(zip(crinfo, np.asarray(crinfo) + data.shape))
crinfo = fix_crinfo(crinfo)
data_out = np.ones(orig_shape, dtype=data.dtype) * cval
# print 'uncrop ', crinfo
# print orig_shape
# print data.shape
if resize:
data = resize_to_shape(data, crinfo[:, 1] - crinfo[:, 0])
startx = np.round(crinfo[0][0]).astype(int)
starty = np.round(crinfo[1][0]).astype(int)
startz = np.round(crinfo[2][0]).astype(int)
data_out[
# np.round(crinfo[0][0]).astype(int):np.round(crinfo[0][1]).astype(int)+1,
# np.round(crinfo[1][0]).astype(int):np.round(crinfo[1][1]).astype(int)+1,
# np.round(crinfo[2][0]).astype(int):np.round(crinfo[2][1]).astype(int)+1
startx : startx + data.shape[0],
starty : starty + data.shape[1],
startz : startz + data.shape[2],
] = data
if outside_mode == "nearest":
# for ax in range(data.ndims):
# ax = 0
# copy border slice to pixels out of boundary - the higher part
for ax in range(data.ndim):
# the part under the crop
start = np.round(crinfo[ax][0]).astype(int)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = start
repeated_slice = np.expand_dims(data_out[slices], ax)
append_sz = start
if append_sz > 0:
tile0 = np.repeat(repeated_slice, append_sz, axis=ax)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = slice(None, start)
# data_out[start + data.shape[ax] : , :, :] = tile0
data_out[slices] = tile0
# plt.imshow(np.squeeze(repeated_slice))
# plt.show()
# the part over the crop
start = np.round(crinfo[ax][0]).astype(int)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = start + data.shape[ax] - 1
repeated_slice = np.expand_dims(data_out[slices], ax)
append_sz = data_out.shape[ax] - (start + data.shape[ax])
if append_sz > 0:
tile0 = np.repeat(repeated_slice, append_sz, axis=ax)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = slice(start + data.shape[ax], None)
# data_out[start + data.shape[ax] : , :, :] = tile0
data_out[slices] = tile0
# plt.imshow(np.squeeze(repeated_slice))
# plt.show()
return data_out | [
"def crop(data, crinfo):\n \"\"\"\n Crop the data.\n\n crop(data, crinfo)\n\n :param crinfo: min and max for each axis - [[minX, maxX], [minY, maxY], [minZ, maxZ]]\n\n \"\"\"\n crinfo = fix_crinfo(crinfo)\n return data[\n __int_or_none(crinfo[0][0]) : __int_or_none(crinfo[0][1]),\n ... | [
0.6718054413795471,
0.6605711579322815,
0.6366471648216248,
0.6345922350883484,
0.6334717869758606,
0.6292850971221924,
0.627955973148346,
0.6246779561042786,
0.6235324740409851,
0.6230271458625793,
0.622809886932373,
0.622344434261322
] |
Function recognize order of crinfo and convert it to proper format. | def fix_crinfo(crinfo, to="axis"):
"""
Function recognize order of crinfo and convert it to proper format.
"""
crinfo = np.asarray(crinfo)
if crinfo.shape[0] == 2:
crinfo = crinfo.T
return crinfo | [
"def combinecrinfo(crinfo1, crinfo2):\n \"\"\"\n Combine two crinfos. First used is crinfo1, second used is crinfo2.\n \"\"\"\n crinfo1 = fix_crinfo(crinfo1)\n crinfo2 = fix_crinfo(crinfo2)\n\n crinfo = [\n [crinfo1[0][0] + crinfo2[0][0], crinfo1[0][0] + crinfo2[0][1]],\n [crinfo1[1]... | [
0.7453998327255249,
0.6900262832641602,
0.6783666610717773,
0.6655680537223816,
0.6643850803375244,
0.6607131361961365,
0.6585497856140137,
0.6559640169143677,
0.6520832777023315,
0.6511232852935791,
0.6503932476043701,
0.6451404094696045
] |
Get list of grid edges
:param shape:
:param inds:
:param return_directions:
:return: | def grid_edges(shape, inds=None, return_directions=True):
"""
Get list of grid edges
:param shape:
:param inds:
:param return_directions:
:return:
"""
if inds is None:
inds = np.arange(np.prod(shape)).reshape(shape)
# if not self.segparams['use_boundary_penalties'] and \
# boundary_penalties_fcn is None :
if len(shape) == 2:
edgx = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]
edgy = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]
edges = [edgx, edgy]
directions = [
np.ones([edgx.shape[0]], dtype=np.int8) * 0,
np.ones([edgy.shape[0]], dtype=np.int8) * 1,
]
elif len(shape) == 3:
# This is faster for some specific format
edgx = np.c_[inds[:, :, :-1].ravel(), inds[:, :, 1:].ravel()]
edgy = np.c_[inds[:, :-1, :].ravel(), inds[:, 1:, :].ravel()]
edgz = np.c_[inds[:-1, :, :].ravel(), inds[1:, :, :].ravel()]
edges = [edgx, edgy, edgz]
else:
logger.error("Expected 2D or 3D data")
# for all edges along first direction put 0, for second direction put 1, for third direction put 3
if return_directions:
directions = []
for idirection in range(len(shape)):
directions.append(
np.ones([edges[idirection].shape[0]], dtype=np.int8) * idirection
)
edges = np.concatenate(edges)
if return_directions:
edge_dir = np.concatenate(directions)
return edges, edge_dir
else:
return edges | [
"def gen_grid_2d(shape, voxelsize):\n \"\"\"\n Generate list of edges for a base grid.\n \"\"\"\n nr, nc = shape\n nrm1, ncm1 = nr - 1, nc - 1\n # sh = nm.asarray(shape)\n # calculate number of edges, in 2D: (nrows * (ncols - 1)) + ((nrows - 1) * ncols)\n nedges = 0\n for direction in ran... | [
0.6559106707572937,
0.6455737948417664,
0.645016610622406,
0.6407939791679382,
0.6346331238746643,
0.6286357045173645,
0.6279879808425903,
0.6273221969604492,
0.6237670183181763,
0.622414231300354,
0.6219837665557861,
0.6216621398925781
] |
Generate list of edges for a base grid. | def gen_grid_2d(shape, voxelsize):
"""
Generate list of edges for a base grid.
"""
nr, nc = shape
nrm1, ncm1 = nr - 1, nc - 1
# sh = nm.asarray(shape)
# calculate number of edges, in 2D: (nrows * (ncols - 1)) + ((nrows - 1) * ncols)
nedges = 0
for direction in range(len(shape)):
sh = copy.copy(list(shape))
sh[direction] += -1
nedges += nm.prod(sh)
nedges_old = ncm1 * nr + nrm1 * nc
edges = nm.zeros((nedges, 2), dtype=nm.int16)
edge_dir = nm.zeros((ncm1 * nr + nrm1 * nc,), dtype=nm.bool)
nodes = nm.zeros((nm.prod(shape), 3), dtype=nm.float32)
# edges
idx = 0
row = nm.zeros((ncm1, 2), dtype=nm.int16)
row[:, 0] = nm.arange(ncm1)
row[:, 1] = nm.arange(ncm1) + 1
for ii in range(nr):
edges[slice(idx, idx + ncm1), :] = row + nc * ii
idx += ncm1
edge_dir[slice(0, idx)] = 0 # horizontal dir
idx0 = idx
col = nm.zeros((nrm1, 2), dtype=nm.int16)
col[:, 0] = nm.arange(nrm1) * nc
col[:, 1] = nm.arange(nrm1) * nc + nc
for ii in range(nc):
edges[slice(idx, idx + nrm1), :] = col + ii
idx += nrm1
edge_dir[slice(idx0, idx)] = 1 # vertical dir
# nodes
idx = 0
row = nm.zeros((nc, 3), dtype=nm.float32)
row[:, 0] = voxelsize[0] * (nm.arange(nc) + 0.5)
row[:, 1] = voxelsize[1] * 0.5
for ii in range(nr):
nodes[slice(idx, idx + nc), :] = row
row[:, 1] += voxelsize[1]
idx += nc
return nodes, edges, edge_dir | [
"def create_edges(self):\n \"\"\"Set up edge-node and edge-cell relations.\n \"\"\"\n # Reshape into individual edges.\n # Sort the columns to make it possible for `unique()` to identify\n # individual edges.\n s = self.idx_hierarchy.shape\n a = numpy.sort(self.idx_h... | [
0.7401100993156433,
0.7308559417724609,
0.7190775871276855,
0.707381546497345,
0.7058398723602295,
0.7043352723121643,
0.7015905976295471,
0.7009677886962891,
0.6896178722381592,
0.6852476596832275,
0.6844310164451599,
0.6842830777168274
] |
Write nodes and edges to VTK file
:param fname: VTK filename
:param nodes:
:param edges:
:param node_flag: set if this node is really used in output
:param edge_flag: set if this flag is used in output
:return: | def write_grid_to_vtk(fname, nodes, edges, node_flag=None, edge_flag=None):
"""
Write nodes and edges to VTK file
:param fname: VTK filename
:param nodes:
:param edges:
:param node_flag: set if this node is really used in output
:param edge_flag: set if this flag is used in output
:return:
"""
if node_flag is None:
node_flag = np.ones([nodes.shape[0]], dtype=np.bool)
if edge_flag is None:
edge_flag = np.ones([edges.shape[0]], dtype=np.bool)
nodes = make_nodes_3d(nodes)
f = open(fname, "w")
f.write("# vtk DataFile Version 2.6\n")
f.write("output file\nASCII\nDATASET UNSTRUCTURED_GRID\n")
idxs = nm.where(node_flag > 0)[0]
nnd = len(idxs)
aux = -nm.ones(node_flag.shape, dtype=nm.int32)
aux[idxs] = nm.arange(nnd, dtype=nm.int32)
f.write("\nPOINTS %d float\n" % nnd)
for ndi in idxs:
f.write("%.6f %.6f %.6f\n" % tuple(nodes[ndi, :]))
idxs = nm.where(edge_flag > 0)[0]
ned = len(idxs)
f.write("\nCELLS %d %d\n" % (ned, ned * 3))
for edi in idxs:
f.write("2 %d %d\n" % tuple(aux[edges[edi, :]]))
f.write("\nCELL_TYPES %d\n" % ned)
for edi in idxs:
f.write("3\n") | [
"def write_edges(\n edges: Mapping[str, Any],\n filename: str,\n jsonlines: bool = False,\n gzipflag: bool = False,\n yaml: bool = False,\n):\n \"\"\"Write edges to file\n\n Args:\n edges (Mapping[str, Any]): in edges JSON Schema format\n filename (str): filename to write\n ... | [
0.7161726355552673,
0.6993839740753174,
0.6887747049331665,
0.6847518086433411,
0.6826242208480835,
0.6810848712921143,
0.6732126474380493,
0.6722975969314575,
0.6669342517852783,
0.6657159924507141,
0.6601456999778748,
0.6593608260154724
] |
Add new nodes at the end of the list. | def add_nodes(self, coors, node_low_or_high=None):
"""
Add new nodes at the end of the list.
"""
last = self.lastnode
if type(coors) is nm.ndarray:
if len(coors.shape) == 1:
coors = coors.reshape((1, coors.size))
nadd = coors.shape[0]
idx = slice(last, last + nadd)
else:
nadd = 1
idx = self.lastnode
right_dimension = coors.shape[1]
self.nodes[idx, :right_dimension] = coors
self.node_flag[idx] = True
self.lastnode += nadd
self.nnodes += nadd | [
"def append(self, *nodes: Union[AbstractNode, str]) -> None:\n \"\"\"Append new nodes after last child node.\"\"\"\n node = _to_node_list(nodes)\n self.appendChild(node)",
"function(list){\n\t\t\tvar index = 0;\n\t\t\twhile(index < list.length) {\n\t\t\t\tvar node = list[index],\n\t\t\t\t\tch... | [
0.7512038350105286,
0.7270193696022034,
0.7220104932785034,
0.7022676467895508,
0.7018536329269409,
0.7017405033111572,
0.7012209892272949,
0.7008579969406128,
0.6987891793251038,
0.6986714005470276,
0.6984930634498596,
0.6933650374412537
] |
Add new edges at the end of the list.
:param edge_direction: direction flag
:param edge_group: describes group of edges from same low super node and same direction
:param edge_low_or_high: zero for low to low resolution, one for high to high or high to low resolution.
It is used to set weight from weight table. | def add_edges(self, conn, edge_direction, edge_group=None, edge_low_or_high=None):
"""
Add new edges at the end of the list.
:param edge_direction: direction flag
:param edge_group: describes group of edges from same low super node and same direction
:param edge_low_or_high: zero for low to low resolution, one for high to high or high to low resolution.
It is used to set weight from weight table.
"""
last = self.lastedge
if type(conn) is nm.ndarray:
nadd = conn.shape[0]
idx = slice(last, last + nadd)
if edge_group is None:
edge_group = nm.arange(nadd) + last
else:
nadd = 1
idx = nm.array([last])
conn = nm.array(conn).reshape((1, 2))
if edge_group is None:
edge_group = idx
self.edges[idx, :] = conn
self.edge_flag[idx] = True
# t_start0 = time.time()
# self.edge_flag_idx.extend(list(range(idx.start, idx.stop)))
# self.stats["t split 082"] += time.time() - t_start0
self.edge_dir[idx] = edge_direction
self.edge_group[idx] = edge_group
# TODO change this just to array of low_or_high_resolution
if edge_low_or_high is not None and self._edge_weight_table is not None:
self.edges_weights[idx] = self._edge_weight_table[
edge_low_or_high, edge_direction
]
self.lastedge += nadd
self.nedges += nadd | [
"def add_edge(self, head_id, tail_id, edge_data=1, create_nodes=True):\n \"\"\"\n Adds a directed edge going from head_id to tail_id.\n Arbitrary data can be attached to the edge via edge_data.\n It may create the nodes if adding edges between nonexisting ones.\n\n :param head_id:... | [
0.669408917427063,
0.6667589545249939,
0.6647486686706543,
0.658531665802002,
0.656326949596405,
0.6461688280105591,
0.6458262801170349,
0.6382313370704651,
0.6357985138893127,
0.6345136165618896,
0.633725643157959,
0.6332263350486755
] |
Reconnect edges.
:param ndid: id of low resolution edges
:param nsplit: number of split
:param idxs: indexes of low resolution
:param sr_tab:
:param ndoffset:
:param ed_remove:
:param into_or_from: if zero, connection of input edges is done. If one, connection of output edges
is performed.
:return: | def _edge_group_substitution(
self, ndid, nsplit, idxs, sr_tab, ndoffset, ed_remove, into_or_from
):
"""
Reconnect edges.
:param ndid: id of low resolution edges
:param nsplit: number of split
:param idxs: indexes of low resolution
:param sr_tab:
:param ndoffset:
:param ed_remove:
:param into_or_from: if zero, connection of input edges is done. If one, connection of output edges
is performed.
:return:
"""
# this is useful for type(idxs) == np.ndarray
eidxs = idxs[nm.where(self.edges[idxs, 1 - into_or_from] == ndid)[0]]
# selected_edges = self.edges[idxs, 1 - into_or_from]
# selected_edges == ndid
# whre = nm.where(self.edges[idxs, 1 - into_or_from] == ndid)
# whre0 = (nm.where(self.edges[idxs, 1 - into_or_from] == ndid) == ndid)[0]
# eidxs = [idxs[i] for i in idxs]
for igrp in self.edges_by_group(eidxs):
if igrp.shape[0] > 1:
# high resolution block to high resolution block
# all directions are the same
directions = self.edge_dir[igrp[0]]
edge_indexes = sr_tab[directions, :].T.flatten() + ndoffset
# debug code
# if len(igrp) != len(edge_indexes):
# print("Problem ")
self.edges[igrp, 1] = edge_indexes
if self._edge_weight_table is not None:
self.edges_weights[igrp] = self._edge_weight_table[1, directions]
else:
# low res block to hi res block, if into_or_from is set to 0
# hig res block to low res block, if into_or_from is set to 1
ed_remove.append(igrp[0])
# number of new edges is equal to number of pixels on one side of the box (in 2D and D too)
nnewed = np.power(nsplit, self.data.ndim - 1)
muleidxs = nm.tile(igrp, nnewed)
# copy the low-res edge multipletime
newed = self.edges[muleidxs, :]
neweddir = self.edge_dir[muleidxs]
local_node_ids = sr_tab[
self.edge_dir[igrp] + self.data.ndim * into_or_from, :
].T.flatten()
# first or second (the actual) node id is substitued by new node indexes
newed[:, 1 - into_or_from] = local_node_ids + ndoffset
if self._edge_weight_table is not None:
self.add_edges(
newed, neweddir, self.edge_group[igrp], edge_low_or_high=1
)
else:
self.add_edges(
newed, neweddir, self.edge_group[igrp], edge_low_or_high=None
)
return ed_remove | [
"def reconnectPorts(root: LNode, srcPort: LPort,\n oldSplits: List[Tuple[LNode, LEdge]],\n newSplitNode: LNode):\n \"\"\"\n :ivar root: top LNode instance in which are nodes and links stored\n :ivar srcPort: for SLICE it is port which is connected to input of SLICE node\... | [
0.7014980912208557,
0.6444256901741028,
0.6356461644172668,
0.6267616152763367,
0.6257805824279785,
0.6197803616523743,
0.6172372698783875,
0.6164812445640564,
0.6139699816703796,
0.6091693043708801,
0.6076071858406067,
0.6074635982513428
] |
Run first step of algorithm. Next step is split_voxels
:param vtk_filename:
:return: | def generate_base_grid(self, vtk_filename=None):
"""
Run first step of algorithm. Next step is split_voxels
:param vtk_filename:
:return:
"""
nd, ed, ed_dir = self.gen_grid_fcn(self.data.shape, self.voxelsize)
self.add_nodes(nd)
self.add_edges(ed, ed_dir, edge_low_or_high=0)
if vtk_filename is not None:
self.write_vtk(vtk_filename) | [
"def split_voxels(self, vtk_filename=None):\n \"\"\"\n Second step of algorithm\n :return:()\n \"\"\"\n self.cache = {}\n self.stats[\"t graph 10\"] = time.time() - self.start_time\n self.msi = MultiscaleArray(self.data.shape, block_size=self.nsplit)\n\n # old... | [
0.7477437257766724,
0.6800397038459778,
0.672272264957428,
0.6702258586883545,
0.6685526967048645,
0.6679937839508057,
0.6633126139640808,
0.6589624881744385,
0.6504949927330017,
0.6503692865371704,
0.6481964588165283,
0.6472215056419373
] |
Second step of algorithm
:return:() | def split_voxels(self, vtk_filename=None):
"""
Second step of algorithm
:return:()
"""
self.cache = {}
self.stats["t graph 10"] = time.time() - self.start_time
self.msi = MultiscaleArray(self.data.shape, block_size=self.nsplit)
# old implementation
# idxs = nm.where(self.data)
# nr, nc = self.data.shape
# for k, (ir, ic) in enumerate(zip(*idxs)):
# ndid = ic + ir * nc
# self.split_voxel(ndid, self.nsplit)
# new_implementation
# for ndid in np.flatnonzero(self.data):
# self.split_voxel(ndid, self.nsplit)
# even newer implementation
self.stats["t graph 11"] = time.time() - self.start_time
for ndid, val in enumerate(self.data.ravel()):
t_split_start = time.time()
if val == 0:
if self.compute_msindex:
self.msi.set_block_lowres(ndid, ndid)
self.stats["t graph low"] += time.time() - t_split_start
else:
self.split_voxel(ndid)
self.stats["t graph high"] += time.time() - t_split_start
self.stats["t graph 13"] = time.time() - self.start_time
self.finish()
if vtk_filename is not None:
self.write_vtk(vtk_filename)
self.stats["t graph 14"] = time.time() - self.start_time | [
"def step2(self):\n \"\"\"step2() maps double suffices to single ones.\n so -ization ( = -ize plus -ation) maps to -ize etc. note that the\n string before the suffix must give m() > 0.\n \"\"\"\n if self.b[self.k - 1] == \"a\":\n if self.ends(\"ational\"):\n ... | [
0.7399905920028687,
0.7189586758613586,
0.6995337605476379,
0.6849796772003174,
0.6814959645271301,
0.6771830320358276,
0.6719550490379333,
0.6714085340499878,
0.6711153388023376,
0.654771625995636,
0.653181791305542,
0.6484779119491577
] |
Multiply values in block | def mul_block(self, index, val):
"""Multiply values in block"""
self._prepare_cache_slice(index)
self.msinds[self.cache_slice] *= val | [
"function blockMult(a, b, rows, cols) {\n // For small matrices, resort to naive multiplication.\n if (rows <= 512 || cols <= 512) {\n return a.mmul(b); // a is equivalent to this\n }\n\n ... | [
0.7522327899932861,
0.7504237294197083,
0.7500613927841187,
0.7392536401748657,
0.7338317036628723,
0.7292303442955017,
0.7271780371665955,
0.7201430201530457,
0.7198589444160461,
0.7174651622772217,
0.7109858989715576,
0.7057550549507141
] |
Tool to make simple feature functions take features from feature array by seeds.
:param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number
of features
:param seeds: ndarray with seeds. Does not to be linear.
:param unique_cls: number of used seeds clases. Like [1, 2]
:return: fv_selection, seeds_selection - selection from feature vector and selection from seeds | def select_from_fv_by_seeds(fv, seeds, unique_cls):
"""
Tool to make simple feature functions take features from feature array by seeds.
:param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number
of features
:param seeds: ndarray with seeds. Does not to be linear.
:param unique_cls: number of used seeds clases. Like [1, 2]
:return: fv_selection, seeds_selection - selection from feature vector and selection from seeds
"""
logger.debug("seeds" + str(seeds))
# fvlin = fv.reshape(-1, int(fv.size/seeds.size))
expected_shape = [seeds.size, int(fv.size/seeds.size)]
if fv.shape[0] != expected_shape[0] or fv.shape[1] != expected_shape[1]:
raise AssertionError("Wrong shape of input feature vector array fv")
# sd = seeds.reshape(-1, 1)
selection = np.in1d(seeds, unique_cls)
fv_selection = fv[selection]
seeds_selection = seeds.flatten()[selection]
# sd = sd[]
return fv_selection, seeds_selection | [
"def return_fv_by_seeds(fv, seeds=None, unique_cls=None):\n \"\"\"\n Return features selected by seeds and unique_cls or selection from features and corresponding seed classes.\n\n :param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number\n of feat... | [
0.9158183336257935,
0.7435174584388733,
0.6858772039413452,
0.6444513201713562,
0.6422149538993835,
0.6410118937492371,
0.6389459371566772,
0.6308839917182922,
0.6264603137969971,
0.6260638236999512,
0.6256772875785828,
0.6210070848464966
] |
Return features selected by seeds and unique_cls or selection from features and corresponding seed classes.
:param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number
of features
:param seeds: ndarray with seeds. Does not to be linear.
:param unique_cls: number of used seeds clases. Like [1, 2]
:return: fv, sd - selection from feature vector and selection from seeds or just fv for whole image | def return_fv_by_seeds(fv, seeds=None, unique_cls=None):
"""
Return features selected by seeds and unique_cls or selection from features and corresponding seed classes.
:param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number
of features
:param seeds: ndarray with seeds. Does not to be linear.
:param unique_cls: number of used seeds clases. Like [1, 2]
:return: fv, sd - selection from feature vector and selection from seeds or just fv for whole image
"""
if seeds is not None:
if unique_cls is not None:
return select_from_fv_by_seeds(fv, seeds, unique_cls)
else:
raise AssertionError("Input unique_cls has to be not None if seeds is not None.")
else:
return fv | [
"def select_from_fv_by_seeds(fv, seeds, unique_cls):\n \"\"\"\n Tool to make simple feature functions take features from feature array by seeds.\n :param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number\n of features\n :param seeds: ndarray wi... | [
0.8714072704315186,
0.735267162322998,
0.6699656844139099,
0.6452354788780212,
0.6315693259239197,
0.6298038363456726,
0.6262094974517822,
0.6221494674682617,
0.6198633313179016,
0.61878901720047,
0.6154218316078186,
0.6153923869132996
] |
Expands logical constructions. | def expand(self, expression):
"""Expands logical constructions."""
self.logger.debug("expand : expression %s", str(expression))
if not is_string(expression):
return expression
result = self._pattern.sub(lambda var: str(self._variables[var.group(1)]), expression)
result = result.strip()
self.logger.debug('expand : %s - result : %s', expression, result)
if is_number(result):
if result.isdigit():
self.logger.debug(' expand is integer !!!')
return int(result)
else:
self.logger.debug(' expand is float !!!')
return float(result)
return result | [
"def _expand_logical_shortcuts(cls, schema):\n \"\"\" Expand agglutinated rules in a definition-schema.\n\n :param schema: The schema-definition to expand.\n :return: The expanded schema-definition.\n \"\"\"\n def is_of_rule(x):\n return isinstance(x, _str_type) and \\\... | [
0.7648979425430298,
0.7185328602790833,
0.7133020758628845,
0.712088942527771,
0.7094528675079346,
0.7062499523162842,
0.7050736546516418,
0.7036584615707397,
0.703376293182373,
0.6962770223617554,
0.6927876472473145,
0.691910982131958
] |
Creates gutter clients and memoizes them in a registry for future quick access.
Args:
alias (str or None): Name of the client. Used for caching.
If name is falsy then do not use the cache.
cache (dict): cache to store gutter managers in.
**kwargs: kwargs to be passed the Manger class.
Returns (Manager):
A gutter client. | def get_gutter_client(
alias='default',
cache=CLIENT_CACHE,
**kwargs
):
"""
Creates gutter clients and memoizes them in a registry for future quick access.
Args:
alias (str or None): Name of the client. Used for caching.
If name is falsy then do not use the cache.
cache (dict): cache to store gutter managers in.
**kwargs: kwargs to be passed the Manger class.
Returns (Manager):
A gutter client.
"""
from gutter.client.models import Manager
if not alias:
return Manager(**kwargs)
elif alias not in cache:
cache[alias] = Manager(**kwargs)
return cache[alias] | [
"def create(self, alias=None, cache=None, **kwargs):\n \"\"\"\n Create a new cache. Either alias or cache params are required. You can use\n kwargs to pass extra parameters to configure the cache.\n\n .. deprecated:: 0.11.0\n Only creating a cache passing an alias is supported... | [
0.6883594989776611,
0.6749633550643921,
0.6676217317581177,
0.6557245850563049,
0.6497238278388977,
0.6472004055976868,
0.646845281124115,
0.6415619254112244,
0.6407591700553894,
0.6398371458053589,
0.6362635493278503,
0.6346388459205627
] |
The mod operator is prone to floating point errors, so use decimal.
101.1 % 100
>>> 1.0999999999999943
decimal_context.divmod(Decimal('100.1'), 100)
>>> (Decimal('1'), Decimal('0.1')) | def _modulo(self, decimal_argument):
"""
The mod operator is prone to floating point errors, so use decimal.
101.1 % 100
>>> 1.0999999999999943
decimal_context.divmod(Decimal('100.1'), 100)
>>> (Decimal('1'), Decimal('0.1'))
"""
_times, remainder = self._context.divmod(decimal_argument, 100)
# match the builtin % behavior by adding the N to the result if negative
return remainder if remainder >= 0 else remainder + 100 | [
"def div(a,b):\n \"\"\"``div(a,b)`` is like ``a // b`` if ``b`` devides ``a``, otherwise\n an `ValueError` is raised.\n\n >>> div(10,2)\n 5\n >>> div(10,3)\n Traceback (most recent call last):\n ...\n ValueError: 3 does not divide 10\n \"\"\"\n res, fail = divmod(a,b)\n if fail:\n ... | [
0.7502017021179199,
0.7433937788009644,
0.7366652488708496,
0.7245782613754272,
0.7240141034126282,
0.716590404510498,
0.7109190821647644,
0.7009940147399902,
0.6962355971336365,
0.6936264038085938,
0.683988094329834,
0.681254506111145
] |
Checks to see if this switch is enabled for the provided input.
If ``compounded``, all switch conditions must be ``True`` for the switch
to be enabled. Otherwise, *any* condition needs to be ``True`` for the
switch to be enabled.
The switch state is then checked to see if it is ``GLOBAL`` or
``DISABLED``. If it is not, then the switch is ``SELECTIVE`` and each
condition is checked.
Keyword Arguments:
inpt -- An instance of the ``Input`` class. | def enabled_for(self, inpt):
"""
Checks to see if this switch is enabled for the provided input.
If ``compounded``, all switch conditions must be ``True`` for the switch
to be enabled. Otherwise, *any* condition needs to be ``True`` for the
switch to be enabled.
The switch state is then checked to see if it is ``GLOBAL`` or
``DISABLED``. If it is not, then the switch is ``SELECTIVE`` and each
condition is checked.
Keyword Arguments:
inpt -- An instance of the ``Input`` class.
"""
signals.switch_checked.call(self)
signal_decorated = partial(self.__signal_and_return, inpt)
if self.state is self.states.GLOBAL:
return signal_decorated(True)
elif self.state is self.states.DISABLED:
return signal_decorated(False)
conditions_dict = ConditionsDict.from_conditions_list(self.conditions)
conditions = conditions_dict.get_by_input(inpt)
if conditions:
result = self.__enabled_func(
cond.call(inpt)
for cond
in conditions
if cond.argument(inpt).applies
)
else:
result = None
return signal_decorated(result) | [
"def call(self, inpt):\n \"\"\"\n Returns if the condition applies to the ``inpt``.\n\n If the class ``inpt`` is an instance of is not the same class as the\n condition's own ``argument``, then ``False`` is returned. This also\n applies to the ``NONE`` input.\n\n Otherwise... | [
0.6927810311317444,
0.6696218252182007,
0.666363000869751,
0.6596062779426575,
0.6584011316299438,
0.6560157537460327,
0.6553595066070557,
0.64560866355896,
0.6452962756156921,
0.6417357921600342,
0.6405673623085022,
0.6382938623428345
] |
Returns if the condition applies to the ``inpt``.
If the class ``inpt`` is an instance of is not the same class as the
condition's own ``argument``, then ``False`` is returned. This also
applies to the ``NONE`` input.
Otherwise, ``argument`` is called, with ``inpt`` as the instance and
the value is compared to the ``operator`` and the Value is returned. If
the condition is ``negative``, then then ``not`` the value is returned.
Keyword Arguments:
inpt -- An instance of the ``Input`` class. | def call(self, inpt):
"""
Returns if the condition applies to the ``inpt``.
If the class ``inpt`` is an instance of is not the same class as the
condition's own ``argument``, then ``False`` is returned. This also
applies to the ``NONE`` input.
Otherwise, ``argument`` is called, with ``inpt`` as the instance and
the value is compared to the ``operator`` and the Value is returned. If
the condition is ``negative``, then then ``not`` the value is returned.
Keyword Arguments:
inpt -- An instance of the ``Input`` class.
"""
if inpt is Manager.NONE_INPUT:
return False
# Call (construct) the argument with the input object
argument_instance = self.argument(inpt)
if not argument_instance.applies:
return False
application = self.__apply(argument_instance, inpt)
if self.negative:
application = not application
return application | [
"def enabled_for(self, inpt):\n \"\"\"\n Checks to see if this switch is enabled for the provided input.\n\n If ``compounded``, all switch conditions must be ``True`` for the switch\n to be enabled. Otherwise, *any* condition needs to be ``True`` for the\n switch to be enabled.\n... | [
0.6861664056777954,
0.6830622553825378,
0.6745240092277527,
0.6726325750350952,
0.6664140820503235,
0.6653026938438416,
0.6593443155288696,
0.658407986164093,
0.6563044786453247,
0.6530144810676575,
0.6528307795524597,
0.6505153775215149
] |
List of all switches currently registered. | def switches(self):
"""
List of all switches currently registered.
"""
results = [
switch for name, switch in self.storage.iteritems()
if name.startswith(self.__joined_namespace)
]
return results | [
"def do_list_logical_switch(self, line):\n \"\"\"list_logical_switch <peer>\n \"\"\"\n\n def f(p, args):\n o = p.get()\n for s in o.logical_switches.switch:\n print('%s %s' % (s.id, s.datapath_id))\n\n self._request(line, f)",
"def switches(self):\n... | [
0.7795083522796631,
0.7660427689552307,
0.7531906962394714,
0.7399041652679443,
0.739628255367279,
0.7348507642745972,
0.7344115376472473,
0.724181056022644,
0.7154394388198853,
0.7123521566390991,
0.7080769538879395,
0.7069939970970154
] |
Returns the switch with the provided ``name``.
If ``autocreate`` is set to ``True`` and no switch with that name
exists, a ``DISABLED`` switch will be with that name.
Keyword Arguments:
name -- A name of a switch. | def switch(self, name):
"""
Returns the switch with the provided ``name``.
If ``autocreate`` is set to ``True`` and no switch with that name
exists, a ``DISABLED`` switch will be with that name.
Keyword Arguments:
name -- A name of a switch.
"""
try:
switch = self.storage[self.__namespaced(name)]
except KeyError:
if not self.autocreate:
raise ValueError("No switch named '%s' registered in '%s'" % (name, self.namespace))
switch = self.__create_and_register_disabled_switch(name)
switch.manager = self
return switch | [
"def auto(name):\n '''\n .. versionadded:: 0.17.0\n\n Instruct alternatives to use the highest priority\n path for <name>\n\n name\n is the master name for this link group\n (e.g. pager)\n\n '''\n ret = {'name': name,\n 'result': True,\n 'comment': '',\n ... | [
0.7068734169006348,
0.7061296105384827,
0.7047081589698792,
0.6806841492652893,
0.6628941297531128,
0.6542777419090271,
0.6535179615020752,
0.6494287252426147,
0.649349570274353,
0.6475399732589722,
0.6444427371025085,
0.6438058018684387
] |
Register a switch and persist it to the storage. | def register(self, switch, signal=signals.switch_registered):
'''
Register a switch and persist it to the storage.
'''
if not switch.name:
raise ValueError('Switch name cannot be blank')
switch.manager = self
self.__persist(switch)
signal.call(switch) | [
"def switch(self, name):\n \"\"\"\n Returns the switch with the provided ``name``.\n\n If ``autocreate`` is set to ``True`` and no switch with that name\n exists, a ``DISABLED`` switch will be with that name.\n\n Keyword Arguments:\n name -- A name of a switch.\n \"\... | [
0.7570830583572388,
0.7144901752471924,
0.7065062522888184,
0.689936101436615,
0.6877805590629578,
0.6734769344329834,
0.6715748906135559,
0.6705305576324463,
0.6699982285499573,
0.6660456657409668,
0.6631150841712952,
0.6563107371330261
] |
Central interface to verify interactions.
`verify` uses a fluent interface::
verify(<obj>, times=2).<method_name>(<args>)
`args` can be as concrete as necessary. Often a catch-all is enough,
especially if you're working with strict mocks, bc they throw at call
time on unwanted, unconfigured arguments::
from mockito import ANY, ARGS, KWARGS
when(manager).add_tasks(1, 2, 3)
...
# no need to duplicate the specification; every other argument pattern
# would have raised anyway.
verify(manager).add_tasks(1, 2, 3) # duplicates `when`call
verify(manager).add_tasks(*ARGS)
verify(manager).add_tasks(...) # Py3
verify(manager).add_tasks(Ellipsis) # Py2 | def verify(obj, times=1, atleast=None, atmost=None, between=None,
inorder=False):
"""Central interface to verify interactions.
`verify` uses a fluent interface::
verify(<obj>, times=2).<method_name>(<args>)
`args` can be as concrete as necessary. Often a catch-all is enough,
especially if you're working with strict mocks, bc they throw at call
time on unwanted, unconfigured arguments::
from mockito import ANY, ARGS, KWARGS
when(manager).add_tasks(1, 2, 3)
...
# no need to duplicate the specification; every other argument pattern
# would have raised anyway.
verify(manager).add_tasks(1, 2, 3) # duplicates `when`call
verify(manager).add_tasks(*ARGS)
verify(manager).add_tasks(...) # Py3
verify(manager).add_tasks(Ellipsis) # Py2
"""
if isinstance(obj, str):
obj = get_obj(obj)
verification_fn = _get_wanted_verification(
times=times, atleast=atleast, atmost=atmost, between=between)
if inorder:
verification_fn = verification.InOrder(verification_fn)
# FIXME?: Catch error if obj is neither a Mock nor a known stubbed obj
theMock = _get_mock_or_raise(obj)
class Verify(object):
def __getattr__(self, method_name):
return invocation.VerifiableInvocation(
theMock, method_name, verification_fn)
return Verify() | [
"def verifyZeroInteractions(*objs):\n \"\"\"Verify that no methods have been called on given objs.\n\n Note that strict mocks usually throw early on unexpected, unstubbed\n invocations. Partial mocks ('monkeypatched' objects or modules) do not\n support this functionality at all, bc only for the stubbed... | [
0.7097271084785461,
0.6925081014633179,
0.6835185885429382,
0.6689969897270203,
0.6676076054573059,
0.6642816066741943,
0.6623503565788269,
0.6622745394706726,
0.6559537649154663,
0.6559337377548218,
0.6547770500183105,
0.6542908549308777
] |
Central interface to stub functions on a given `obj`
`obj` should be a module, a class or an instance of a class; it can be
a Dummy you created with :func:`mock`. ``when`` exposes a fluent interface
where you configure a stub in three steps::
when(<obj>).<method_name>(<args>).thenReturn(<value>)
Compared to simple *patching*, stubbing in mockito requires you to specify
conrete `args` for which the stub will answer with a concrete `<value>`.
All invocations that do not match this specific call signature will be
rejected. They usually throw at call time.
Stubbing in mockito's sense thus means not only to get rid of unwanted
side effects, but effectively to turn function calls into constants.
E.g.::
# Given ``dog`` is an instance of a ``Dog``
when(dog).bark('Grrr').thenReturn('Wuff')
when(dog).bark('Miau').thenRaise(TypeError())
# With this configuration set up:
assert dog.bark('Grrr') == 'Wuff'
dog.bark('Miau') # will throw TypeError
dog.bark('Wuff') # will throw unwanted interaction
Stubbing can effectively be used as monkeypatching; usage shown with
the `with` context managing::
with when(os.path).exists('/foo').thenReturn(True):
...
Most of the time verifying your interactions is not necessary, because
your code under tests implicitly verifies the return value by evaluating
it. See :func:`verify` if you need to, see also :func:`expect` to setup
expected call counts up front.
If your function is pure side effect and does not return something, you
can omit the specific answer. The default then is `None`::
when(manager).do_work()
`when` verifies the method name, the expected argument signature, and the
actual, factual arguments your code under test uses against the original
object and its function so its easier to spot changing interfaces.
Sometimes it's tedious to spell out all arguments::
from mockito import ANY, ARGS, KWARGS
when(requests).get('http://example.com/', **KWARGS).thenReturn(...)
when(os.path).exists(ANY)
when(os.path).exists(ANY(str))
.. note:: You must :func:`unstub` after stubbing, or use `with`
statement.
Set ``strict=False`` to bypass the function signature checks.
See related :func:`when2` which has a more pythonic interface. | def when(obj, strict=None):
"""Central interface to stub functions on a given `obj`
`obj` should be a module, a class or an instance of a class; it can be
a Dummy you created with :func:`mock`. ``when`` exposes a fluent interface
where you configure a stub in three steps::
when(<obj>).<method_name>(<args>).thenReturn(<value>)
Compared to simple *patching*, stubbing in mockito requires you to specify
conrete `args` for which the stub will answer with a concrete `<value>`.
All invocations that do not match this specific call signature will be
rejected. They usually throw at call time.
Stubbing in mockito's sense thus means not only to get rid of unwanted
side effects, but effectively to turn function calls into constants.
E.g.::
# Given ``dog`` is an instance of a ``Dog``
when(dog).bark('Grrr').thenReturn('Wuff')
when(dog).bark('Miau').thenRaise(TypeError())
# With this configuration set up:
assert dog.bark('Grrr') == 'Wuff'
dog.bark('Miau') # will throw TypeError
dog.bark('Wuff') # will throw unwanted interaction
Stubbing can effectively be used as monkeypatching; usage shown with
the `with` context managing::
with when(os.path).exists('/foo').thenReturn(True):
...
Most of the time verifying your interactions is not necessary, because
your code under tests implicitly verifies the return value by evaluating
it. See :func:`verify` if you need to, see also :func:`expect` to setup
expected call counts up front.
If your function is pure side effect and does not return something, you
can omit the specific answer. The default then is `None`::
when(manager).do_work()
`when` verifies the method name, the expected argument signature, and the
actual, factual arguments your code under test uses against the original
object and its function so its easier to spot changing interfaces.
Sometimes it's tedious to spell out all arguments::
from mockito import ANY, ARGS, KWARGS
when(requests).get('http://example.com/', **KWARGS).thenReturn(...)
when(os.path).exists(ANY)
when(os.path).exists(ANY(str))
.. note:: You must :func:`unstub` after stubbing, or use `with`
statement.
Set ``strict=False`` to bypass the function signature checks.
See related :func:`when2` which has a more pythonic interface.
"""
if isinstance(obj, str):
obj = get_obj(obj)
if strict is None:
strict = True
theMock = _get_mock(obj, strict=strict)
class When(object):
def __getattr__(self, method_name):
return invocation.StubbedInvocation(
theMock, method_name, strict=strict)
return When() | [
"def when2(fn, *args, **kwargs):\n \"\"\"Stub a function call with the given arguments\n\n Exposes a more pythonic interface than :func:`when`. See :func:`when` for\n more documentation.\n\n Returns `AnswerSelector` interface which exposes `thenReturn`,\n `thenRaise`, and `thenAnswer` as usual. Alway... | [
0.8300884962081909,
0.7341936826705933,
0.7278451919555664,
0.720047116279602,
0.7118588089942932,
0.7065277695655823,
0.7060503959655762,
0.7045286297798157,
0.6935253739356995,
0.6877067685127258,
0.6868920922279358,
0.6828497052192688
] |
Stub a function call with the given arguments
Exposes a more pythonic interface than :func:`when`. See :func:`when` for
more documentation.
Returns `AnswerSelector` interface which exposes `thenReturn`,
`thenRaise`, and `thenAnswer` as usual. Always `strict`.
Usage::
# Given `dog` is an instance of a `Dog`
when2(dog.bark, 'Miau').thenReturn('Wuff')
.. note:: You must :func:`unstub` after stubbing, or use `with`
statement. | def when2(fn, *args, **kwargs):
"""Stub a function call with the given arguments
Exposes a more pythonic interface than :func:`when`. See :func:`when` for
more documentation.
Returns `AnswerSelector` interface which exposes `thenReturn`,
`thenRaise`, and `thenAnswer` as usual. Always `strict`.
Usage::
# Given `dog` is an instance of a `Dog`
when2(dog.bark, 'Miau').thenReturn('Wuff')
.. note:: You must :func:`unstub` after stubbing, or use `with`
statement.
"""
obj, name = get_obj_attr_tuple(fn)
theMock = _get_mock(obj, strict=True)
return invocation.StubbedInvocation(theMock, name)(*args, **kwargs) | [
"def when(obj, strict=None):\n \"\"\"Central interface to stub functions on a given `obj`\n\n `obj` should be a module, a class or an instance of a class; it can be\n a Dummy you created with :func:`mock`. ``when`` exposes a fluent interface\n where you configure a stub in three steps::\n\n when(... | [
0.7158923745155334,
0.7125566005706787,
0.6874305605888367,
0.6713445782661438,
0.6674691438674927,
0.6647620797157288,
0.6502985954284668,
0.6432019472122192,
0.6420909762382507,
0.6394932866096497,
0.6372888088226318,
0.6355871558189392
] |
Patch/Replace a function.
This is really like monkeypatching, but *note* that all interactions
will be recorded and can be verified. That is, using `patch` you stay in
the domain of mockito.
Two ways to call this. Either::
patch(os.path.exists, lambda str: True) # two arguments
# OR
patch(os.path, 'exists', lambda str: True) # three arguments
If called with three arguments, the mode is *not* strict to allow *adding*
methods. If called with two arguments, mode is always `strict`.
.. note:: You must :func:`unstub` after stubbing, or use `with`
statement. | def patch(fn, attr_or_replacement, replacement=None):
"""Patch/Replace a function.
This is really like monkeypatching, but *note* that all interactions
will be recorded and can be verified. That is, using `patch` you stay in
the domain of mockito.
Two ways to call this. Either::
patch(os.path.exists, lambda str: True) # two arguments
# OR
patch(os.path, 'exists', lambda str: True) # three arguments
If called with three arguments, the mode is *not* strict to allow *adding*
methods. If called with two arguments, mode is always `strict`.
.. note:: You must :func:`unstub` after stubbing, or use `with`
statement.
"""
if replacement is None:
replacement = attr_or_replacement
return when2(fn, Ellipsis).thenAnswer(replacement)
else:
obj, name = fn, attr_or_replacement
theMock = _get_mock(obj, strict=True)
return invocation.StubbedInvocation(
theMock, name, strict=False)(Ellipsis).thenAnswer(replacement) | [
"def patch_func(replacement, target_mod, func_name):\n \"\"\"\n Patch func_name in target_mod with replacement\n\n Important - original must be resolved by name to avoid\n patching an already patched function.\n \"\"\"\n original = getattr(target_mod, func_name)\n\n # set the 'unpatched' attrib... | [
0.7399376034736633,
0.7201372385025024,
0.71756911277771,
0.7158401012420654,
0.7078803181648254,
0.6980276107788086,
0.6951728463172913,
0.6931896805763245,
0.6924526691436768,
0.689326822757721,
0.6884449124336243,
0.6861975789070129
] |
Stub a function call, and set up an expected call count.
Usage::
# Given `dog` is an instance of a `Dog`
expect(dog, times=1).bark('Wuff').thenReturn('Miau')
dog.bark('Wuff')
dog.bark('Wuff') # will throw at call time: too many invocations
# maybe if you need to ensure that `dog.bark()` was called at all
verifyNoUnwantedInteractions()
.. note:: You must :func:`unstub` after stubbing, or use `with`
statement.
See :func:`when`, :func:`when2`, :func:`verifyNoUnwantedInteractions` | def expect(obj, strict=None,
times=None, atleast=None, atmost=None, between=None):
"""Stub a function call, and set up an expected call count.
Usage::
# Given `dog` is an instance of a `Dog`
expect(dog, times=1).bark('Wuff').thenReturn('Miau')
dog.bark('Wuff')
dog.bark('Wuff') # will throw at call time: too many invocations
# maybe if you need to ensure that `dog.bark()` was called at all
verifyNoUnwantedInteractions()
.. note:: You must :func:`unstub` after stubbing, or use `with`
statement.
See :func:`when`, :func:`when2`, :func:`verifyNoUnwantedInteractions`
"""
if strict is None:
strict = True
theMock = _get_mock(obj, strict=strict)
verification_fn = _get_wanted_verification(
times=times, atleast=atleast, atmost=atmost, between=between)
class Expect(object):
def __getattr__(self, method_name):
return invocation.StubbedInvocation(
theMock, method_name, verification=verification_fn,
strict=strict)
return Expect() | [
"def when2(fn, *args, **kwargs):\n \"\"\"Stub a function call with the given arguments\n\n Exposes a more pythonic interface than :func:`when`. See :func:`when` for\n more documentation.\n\n Returns `AnswerSelector` interface which exposes `thenReturn`,\n `thenRaise`, and `thenAnswer` as usual. Alway... | [
0.7674841284751892,
0.7304031848907471,
0.7183098196983337,
0.705590009689331,
0.7028374075889587,
0.7000864148139954,
0.6943051815032959,
0.6941514611244202,
0.6917891502380371,
0.6833136677742004,
0.6819828748703003,
0.6706059575080872
] |
Unstubs all stubbed methods and functions
If you don't pass in any argument, *all* registered mocks and
patched modules, classes etc. will be unstubbed.
Note that additionally, the underlying registry will be cleaned.
After an `unstub` you can't :func:`verify` anymore because all
interactions will be forgotten. | def unstub(*objs):
"""Unstubs all stubbed methods and functions
If you don't pass in any argument, *all* registered mocks and
patched modules, classes etc. will be unstubbed.
Note that additionally, the underlying registry will be cleaned.
After an `unstub` you can't :func:`verify` anymore because all
interactions will be forgotten.
"""
if objs:
for obj in objs:
mock_registry.unstub(obj)
else:
mock_registry.unstub_all() | [
"def verifyStubbedInvocationsAreUsed(*objs):\n \"\"\"Ensure stubs are actually used.\n\n This functions just ensures that stubbed methods are actually used. Its\n purpose is to detect interface changes after refactorings. It is meant\n to be invoked usually without arguments just before :func:`unstub`.\... | [
0.7259087562561035,
0.7201184034347534,
0.6834965348243713,
0.6720833778381348,
0.6676120162010193,
0.6579244136810303,
0.6551214456558228,
0.6547223925590515,
0.6515965461730957,
0.6505199670791626,
0.649671733379364,
0.6468276381492615
] |
Verify that no methods have been called on given objs.
Note that strict mocks usually throw early on unexpected, unstubbed
invocations. Partial mocks ('monkeypatched' objects or modules) do not
support this functionality at all, bc only for the stubbed invocations
the actual usage gets recorded. So this function is of limited use,
nowadays. | def verifyZeroInteractions(*objs):
"""Verify that no methods have been called on given objs.
Note that strict mocks usually throw early on unexpected, unstubbed
invocations. Partial mocks ('monkeypatched' objects or modules) do not
support this functionality at all, bc only for the stubbed invocations
the actual usage gets recorded. So this function is of limited use,
nowadays.
"""
for obj in objs:
theMock = _get_mock_or_raise(obj)
if len(theMock.invocations) > 0:
raise VerificationError(
"\nUnwanted interaction: %s" % theMock.invocations[0]) | [
"def verifyStubbedInvocationsAreUsed(*objs):\n \"\"\"Ensure stubs are actually used.\n\n This functions just ensures that stubbed methods are actually used. Its\n purpose is to detect interface changes after refactorings. It is meant\n to be invoked usually without arguments just before :func:`unstub`.\... | [
0.7882779836654663,
0.7657877802848816,
0.7032131552696228,
0.6859399080276489,
0.6765344142913818,
0.6675518155097961,
0.6675054430961609,
0.6674047112464905,
0.6667309999465942,
0.6651041507720947,
0.6645183563232422,
0.6618899703025818
] |
Verifies that expectations set via `expect` are met
E.g.::
expect(os.path, times=1).exists(...).thenReturn(True)
os.path('/foo')
verifyNoUnwantedInteractions(os.path) # ok, called once
If you leave out the argument *all* registered objects will
be checked.
.. note:: **DANGERZONE**: If you did not :func:`unstub` correctly,
it is possible that old registered mocks, from other tests
leak.
See related :func:`expect` | def verifyNoUnwantedInteractions(*objs):
"""Verifies that expectations set via `expect` are met
E.g.::
expect(os.path, times=1).exists(...).thenReturn(True)
os.path('/foo')
verifyNoUnwantedInteractions(os.path) # ok, called once
If you leave out the argument *all* registered objects will
be checked.
.. note:: **DANGERZONE**: If you did not :func:`unstub` correctly,
it is possible that old registered mocks, from other tests
leak.
See related :func:`expect`
"""
if objs:
theMocks = map(_get_mock_or_raise, objs)
else:
theMocks = mock_registry.get_registered_mocks()
for mock in theMocks:
for i in mock.stubbed_invocations:
i.verify() | [
"def unmet_expectations(self):\n '''\n Assert that all expectations on the stub have been met.\n '''\n unmet = []\n for exp in self._expectations:\n if not exp.closed(with_counts=True):\n unmet.append(ExpectationNotSatisfied(exp))\n return unmet",
... | [
0.7114996910095215,
0.7048271894454956,
0.6924911737442017,
0.6901080012321472,
0.6899175047874451,
0.6762266159057617,
0.6691651344299316,
0.6686018109321594,
0.6515703201293945,
0.6468772292137146,
0.6459056735038757,
0.6432895064353943
] |
Ensure stubs are actually used.
This functions just ensures that stubbed methods are actually used. Its
purpose is to detect interface changes after refactorings. It is meant
to be invoked usually without arguments just before :func:`unstub`. | def verifyStubbedInvocationsAreUsed(*objs):
"""Ensure stubs are actually used.
This functions just ensures that stubbed methods are actually used. Its
purpose is to detect interface changes after refactorings. It is meant
to be invoked usually without arguments just before :func:`unstub`.
"""
if objs:
theMocks = map(_get_mock_or_raise, objs)
else:
theMocks = mock_registry.get_registered_mocks()
for mock in theMocks:
for i in mock.stubbed_invocations:
if not i.allow_zero_invocations and i.used < len(i.answers):
raise VerificationError("\nUnused stub: %s" % i) | [
"def unstub(*objs):\n \"\"\"Unstubs all stubbed methods and functions\n\n If you don't pass in any argument, *all* registered mocks and\n patched modules, classes etc. will be unstubbed.\n\n Note that additionally, the underlying registry will be cleaned.\n After an `unstub` you can't :func:`verify` ... | [
0.7473836541175842,
0.7073056697845459,
0.6898674368858337,
0.6717952489852905,
0.6665818095207214,
0.6658259630203247,
0.6647620797157288,
0.6638123989105225,
0.663215160369873,
0.6608758568763733,
0.6538175344467163,
0.6529616713523865
] |
Destructure a given function into its host and its name.
The 'host' of a function is a module, for methods it is usually its
instance or its class. This is safe only for methods, for module wide,
globally declared names it must be considered experimental.
For all reasonable fn: ``getattr(*get_function_host(fn)) == fn``
Returns tuple (host, fn-name)
Otherwise should raise TypeError | def get_function_host(fn):
"""Destructure a given function into its host and its name.
The 'host' of a function is a module, for methods it is usually its
instance or its class. This is safe only for methods, for module wide,
globally declared names it must be considered experimental.
For all reasonable fn: ``getattr(*get_function_host(fn)) == fn``
Returns tuple (host, fn-name)
Otherwise should raise TypeError
"""
obj = None
try:
name = fn.__name__
obj = fn.__self__
except AttributeError:
pass
if obj is None:
# Due to how python imports work, everything that is global on a module
# level must be regarded as not safe here. For now, we go for the extra
# mile, TBC, because just specifying `os.path.exists` would be 'cool'.
#
# TLDR;:
# E.g. `inspect.getmodule(os.path.exists)` returns `genericpath` bc
# that's where `exists` is defined and comes from. But from the point
# of view of the user `exists` always comes and is used from `os.path`
# which points e.g. to `ntpath`. We thus must patch `ntpath`.
# But that's the same for most imports::
#
# # b.py
# from a import foo
#
# Now asking `getmodule(b.foo)` it tells you `a`, but we access and use
# `b.foo` and we therefore must patch `b`.
obj, name = find_invoking_frame_and_try_parse()
# safety check!
assert getattr(obj, name) == fn
return obj, name | [
"def named_function(name):\n \"\"\"Gets a fully named module-global object.\"\"\"\n name_parts = name.split('.')\n module = named_object('.'.join(name_parts[:-1]))\n func = getattr(module, name_parts[-1])\n if hasattr(func, 'original_func'):\n func = func.original_func\n return func",
"de... | [
0.7048628330230713,
0.697286069393158,
0.6927260756492615,
0.686748743057251,
0.677983283996582,
0.6778026223182678,
0.6769918203353882,
0.6759390234947205,
0.6753368377685547,
0.6752420663833618,
0.6730707287788391,
0.6728537082672119
] |
Return obj for given dotted path.
Typical inputs for `path` are 'os' or 'os.path' in which case you get a
module; or 'os.path.exists' in which case you get a function from that
module.
Just returns the given input in case it is not a str.
Note: Relative imports not supported.
Raises ImportError or AttributeError as appropriate. | def get_obj(path):
"""Return obj for given dotted path.
Typical inputs for `path` are 'os' or 'os.path' in which case you get a
module; or 'os.path.exists' in which case you get a function from that
module.
Just returns the given input in case it is not a str.
Note: Relative imports not supported.
Raises ImportError or AttributeError as appropriate.
"""
# Since we usually pass in mocks here; duck typing is not appropriate
# (mocks respond to every attribute).
if not isinstance(path, str):
return path
if path.startswith('.'):
raise TypeError('relative imports are not supported')
parts = path.split('.')
head, tail = parts[0], parts[1:]
obj = importlib.import_module(head)
# Normally a simple reduce, but we go the extra mile
# for good exception messages.
for i, name in enumerate(tail):
try:
obj = getattr(obj, name)
except AttributeError:
# Note the [:i] instead of [:i+1], so we get the path just
# *before* the AttributeError, t.i. the part of it that went ok.
module = '.'.join([head] + tail[:i])
try:
importlib.import_module(module)
except ImportError:
raise AttributeError(
"object '%s' has no attribute '%s'" % (module, name))
else:
raise AttributeError(
"module '%s' has no attribute '%s'" % (module, name))
return obj | [
"def import_string(dotted_path: str) -> Any:\n \"\"\"\n Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the\n last name in the path. Raise ImportError if the import fails.\n \"\"\"\n try:\n module_path, class_name = dotted_path.strip('... | [
0.776495635509491,
0.7743529677391052,
0.7714292407035828,
0.770045816898346,
0.7693743109703064,
0.7667275667190552,
0.7637400031089783,
0.7595250606536865,
0.7577233910560608,
0.7538796663284302,
0.7516044974327087,
0.746537983417511
] |
Split path into (obj, attribute) tuple.
Given `path` is 'os.path.exists' will thus return `(os.path, 'exists')`
If path is not a str, delegates to `get_function_host(path)` | def get_obj_attr_tuple(path):
"""Split path into (obj, attribute) tuple.
Given `path` is 'os.path.exists' will thus return `(os.path, 'exists')`
If path is not a str, delegates to `get_function_host(path)`
"""
if not isinstance(path, str):
return get_function_host(path)
if path.startswith('.'):
raise TypeError('relative imports are not supported')
try:
leading, end = path.rsplit('.', 1)
except ValueError:
raise TypeError('path must have dots')
return get_obj(leading), end | [
"def get_function_host(fn):\n \"\"\"Destructure a given function into its host and its name.\n\n The 'host' of a function is a module, for methods it is usually its\n instance or its class. This is safe only for methods, for module wide,\n globally declared names it must be considered experimental.\n\n ... | [
0.7452395558357239,
0.7234854698181152,
0.6943947672843933,
0.6841155886650085,
0.6819661855697632,
0.6749341487884521,
0.672653079032898,
0.6641891002655029,
0.6619399785995483,
0.6615392565727234,
0.6571950912475586,
0.6566314697265625
] |
Spy an object.
Spying means that all functions will behave as before, so they will
be side effects, but the interactions can be verified afterwards.
Returns Dummy-like, almost empty object as proxy to `object`.
The *returned* object must be injected and used by the code under test;
after that all interactions can be verified as usual.
T.i. the original object **will not be patched**, and has no further
knowledge as before.
E.g.::
import time
time = spy(time)
# inject time
do_work(..., time)
verify(time).time() | def spy(object):
"""Spy an object.
Spying means that all functions will behave as before, so they will
be side effects, but the interactions can be verified afterwards.
Returns Dummy-like, almost empty object as proxy to `object`.
The *returned* object must be injected and used by the code under test;
after that all interactions can be verified as usual.
T.i. the original object **will not be patched**, and has no further
knowledge as before.
E.g.::
import time
time = spy(time)
# inject time
do_work(..., time)
verify(time).time()
"""
if inspect.isclass(object) or inspect.ismodule(object):
class_ = None
else:
class_ = object.__class__
class Spy(_Dummy):
if class_:
__class__ = class_
def __getattr__(self, method_name):
return RememberedProxyInvocation(theMock, method_name)
def __repr__(self):
name = 'Spied'
if class_:
name += class_.__name__
return "<%s id=%s>" % (name, id(self))
obj = Spy()
theMock = Mock(obj, strict=True, spec=object)
mock_registry.register(obj, theMock)
return obj | [
"def spy2(fn): # type: (...) -> None\n \"\"\"Spy usage of given `fn`.\n\n Patches the module, class or object `fn` lives in, so that all\n interactions can be recorded; otherwise executes `fn` as before, so\n that all side effects happen as before.\n\n E.g.::\n\n import time\n spy(time... | [
0.7754902243614197,
0.767021894454956,
0.7479539513587952,
0.7346881031990051,
0.7221837043762207,
0.707771897315979,
0.7065128684043884,
0.7030767798423767,
0.6995481252670288,
0.683078944683075,
0.6758384108543396,
0.669526219367981
] |
Spy usage of given `fn`.
Patches the module, class or object `fn` lives in, so that all
interactions can be recorded; otherwise executes `fn` as before, so
that all side effects happen as before.
E.g.::
import time
spy(time.time)
do_work(...) # nothing injected, uses global patched `time` module
verify(time).time()
Note that builtins often cannot be patched because they're read-only. | def spy2(fn): # type: (...) -> None
"""Spy usage of given `fn`.
Patches the module, class or object `fn` lives in, so that all
interactions can be recorded; otherwise executes `fn` as before, so
that all side effects happen as before.
E.g.::
import time
spy(time.time)
do_work(...) # nothing injected, uses global patched `time` module
verify(time).time()
Note that builtins often cannot be patched because they're read-only.
"""
if isinstance(fn, str):
answer = get_obj(fn)
else:
answer = fn
when2(fn, Ellipsis).thenAnswer(answer) | [
"def spy(object):\n \"\"\"Spy an object.\n\n Spying means that all functions will behave as before, so they will\n be side effects, but the interactions can be verified afterwards.\n\n Returns Dummy-like, almost empty object as proxy to `object`.\n\n The *returned* object must be injected and used by... | [
0.7440390586853027,
0.7235741019248962,
0.7028001546859741,
0.7016107439994812,
0.69905686378479,
0.6942735314369202,
0.6937426328659058,
0.6883745789527893,
0.6880348324775696,
0.6833273768424988,
0.6828630566596985,
0.682716965675354
] |
Create 'empty' objects ('Mocks').
Will create an empty unconfigured object, that you can pass
around. All interactions (method calls) will be recorded and can be
verified using :func:`verify` et.al.
A plain `mock()` will be not `strict`, and thus all methods regardless
of the arguments will return ``None``.
.. note:: Technically all attributes will return an internal interface.
Because of that a simple ``if mock().foo:`` will surprisingly pass.
If you set strict to ``True``: ``mock(strict=True)`` all unexpected
interactions will raise an error instead.
You configure a mock using :func:`when`, :func:`when2` or :func:`expect`.
You can also very conveniently just pass in a dict here::
response = mock({'text': 'ok', 'raise_for_status': lambda: None})
You can also create an empty Mock which is specced against a given
`spec`: ``mock(requests.Response)``. These mock are by default strict,
thus they raise if you want to stub a method, the spec does not implement.
Mockito will also match the function signature.
You can pre-configure a specced mock as well::
response = mock({'json': lambda: {'status': 'Ok'}},
spec=requests.Response)
Mocks are by default callable. Configure the callable behavior using
`when`::
dummy = mock()
when(dummy).__call_(1).thenReturn(2)
All other magic methods must be configured this way or they will raise an
AttributeError.
See :func:`verify` to verify your interactions after usage. | def mock(config_or_spec=None, spec=None, strict=OMITTED):
"""Create 'empty' objects ('Mocks').
Will create an empty unconfigured object, that you can pass
around. All interactions (method calls) will be recorded and can be
verified using :func:`verify` et.al.
A plain `mock()` will be not `strict`, and thus all methods regardless
of the arguments will return ``None``.
.. note:: Technically all attributes will return an internal interface.
Because of that a simple ``if mock().foo:`` will surprisingly pass.
If you set strict to ``True``: ``mock(strict=True)`` all unexpected
interactions will raise an error instead.
You configure a mock using :func:`when`, :func:`when2` or :func:`expect`.
You can also very conveniently just pass in a dict here::
response = mock({'text': 'ok', 'raise_for_status': lambda: None})
You can also create an empty Mock which is specced against a given
`spec`: ``mock(requests.Response)``. These mock are by default strict,
thus they raise if you want to stub a method, the spec does not implement.
Mockito will also match the function signature.
You can pre-configure a specced mock as well::
response = mock({'json': lambda: {'status': 'Ok'}},
spec=requests.Response)
Mocks are by default callable. Configure the callable behavior using
`when`::
dummy = mock()
when(dummy).__call_(1).thenReturn(2)
All other magic methods must be configured this way or they will raise an
AttributeError.
See :func:`verify` to verify your interactions after usage.
"""
if type(config_or_spec) is dict:
config = config_or_spec
else:
config = {}
spec = config_or_spec
if strict is OMITTED:
strict = False if spec is None else True
class Dummy(_Dummy):
if spec:
__class__ = spec # make isinstance work
def __getattr__(self, method_name):
if strict:
raise AttributeError(
"'Dummy' has no attribute %r configured" % method_name)
return functools.partial(
remembered_invocation_builder, theMock, method_name)
def __repr__(self):
name = 'Dummy'
if spec:
name += spec.__name__
return "<%s id=%s>" % (name, id(self))
# That's a tricky one: The object we will return is an *instance* of our
# Dummy class, but the mock we register will point and patch the class.
# T.i. so that magic methods (`__call__` etc.) can be configured.
obj = Dummy()
theMock = Mock(Dummy, strict=strict, spec=spec)
for n, v in config.items():
if inspect.isfunction(v):
invocation.StubbedInvocation(theMock, n)(Ellipsis).thenAnswer(v)
else:
setattr(obj, n, v)
mock_registry.register(obj, theMock)
return obj | [
"def create_autospec(spec, spec_set=False, instance=False, _parent=None,\n _name=None, **kwargs):\n \"\"\"Create a mock object using another object as a spec. Attributes on the\n mock will use the corresponding attribute on the `spec` object as their\n spec.\n\n Functions or methods b... | [
0.729351818561554,
0.713209867477417,
0.6814382076263428,
0.6734673976898193,
0.673088014125824,
0.6675387024879456,
0.6672521233558655,
0.663781464099884,
0.6614181995391846,
0.6592409610748291,
0.6569718718528748,
0.6568246483802795
] |
Function importPuppetClasses
Force the reload of puppet classes
@param smartProxyId: smartProxy Id
@return RETURN: the API result | def importPuppetClasses(self, smartProxyId):
""" Function importPuppetClasses
Force the reload of puppet classes
@param smartProxyId: smartProxy Id
@return RETURN: the API result
"""
return self.api.create('{}/{}/import_puppetclasses'
.format(self.objName, smartProxyId), '{}') | [
"def import_puppetclasses(self, synchronous=True, **kwargs):\n \"\"\"Import puppet classes from puppet Capsule.\n\n :param synchronous: What should happen if the server returns an HTTP\n 202 (accepted) status code? Wait for the task to complete if\n ``True``. Immediately return t... | [
0.7606505155563354,
0.7133715152740479,
0.6375888586044312,
0.6335131525993347,
0.6314616203308105,
0.6294018030166626,
0.6293189525604248,
0.6288355588912964,
0.6247472167015076,
0.6239311695098877,
0.6210521459579468,
0.6183024048805237
] |
Return a list of templates usable by a model. | def get_templates(model):
""" Return a list of templates usable by a model. """
for template_name, template in templates.items():
if issubclass(template.model, model):
yield (template_name, template.layout._meta.verbose_name) | [
"def get_templates(self, action='index'):\n \"\"\"\n Utility function that provides a list of templates to try for a given\n view, when the template isn't overridden by one of the template\n attributes on the class.\n \"\"\"\n app = self.opts.app_label\n model_name =... | [
0.78190678358078,
0.7594239115715027,
0.7541986107826233,
0.7524802684783936,
0.7364693284034729,
0.733937680721283,
0.7246741056442261,
0.7128293514251709,
0.7096962332725525,
0.7047660946846008,
0.7018197774887085,
0.69899982213974
] |
Registers the given layout(s) classes
admin site:
@pages.register(Page)
class Default(PageLayout):
pass | def attach(*layouts, **kwargs):
"""
Registers the given layout(s) classes
admin site:
@pages.register(Page)
class Default(PageLayout):
pass
"""
def _model_admin_wrapper(layout_class):
register(layout_class, layouts[0])
return layout_class
return _model_admin_wrapper | [
"def register():\n \"\"\" Register markdown for flatpages. \"\"\"\n\n admin.site.unregister(FlatPage)\n admin.site.register(FlatPage, LocalFlatPageAdmin)",
"def register_classes_for_admin(db_session, show_pks=True, name='admin'):\n \"\"\"Registers classes for the Admin view that ultimately creates the... | [
0.7355599999427795,
0.7345627546310425,
0.7286189198493958,
0.724654495716095,
0.6845201253890991,
0.6805652976036072,
0.6734296083450317,
0.6697297096252441,
0.6669802069664001,
0.6662369966506958,
0.6658376455307007,
0.6657775044441223
] |
Function enhance
Enhance the object with new item or enhanced items | def enhance(self):
""" Function enhance
Enhance the object with new item or enhanced items
"""
self.update({'os_default_templates':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemOsDefaultTemplate)})
self.update({'config_templates':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemConfigTemplate)})
self.update({'ptables':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemPTable)})
self.update({'media':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemMedia)})
self.update({'architectures':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemArchitecture)}) | [
"def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPar... | [
0.8729672431945801,
0.8696154356002808,
0.8695272207260132,
0.8679497838020325,
0.8630505800247192,
0.8407313823699951,
0.7447202205657959,
0.7079142928123474,
0.707258939743042,
0.6839107275009155,
0.6810131072998047,
0.6799886226654053
] |
Get required API keys from environment variables. | def get_api_envs():
"""Get required API keys from environment variables."""
client_id = os.environ.get('CLIENT_ID')
user_id = os.environ.get('USER_ID')
if not client_id or not user_id:
raise ValueError('API keys are not found in the environment')
return client_id, user_id | [
"def get_keys(self):\n \"\"\"\n Get the Twitter API keys. Order of precedence is command line,\n environment, config file. Return True if all the keys were found\n and False if not.\n \"\"\"\n env = os.environ.get\n if not self.consumer_key:\n self.consume... | [
0.7456530332565308,
0.7439531087875366,
0.7352682948112488,
0.7221922874450684,
0.7217668294906616,
0.7119734287261963,
0.7052922248840332,
0.7042791843414307,
0.7039889097213745,
0.701678454875946,
0.6990089416503906,
0.6967628002166748
] |
Call given API end_point with API keys.
:param method: HTTP method (e.g. 'get', 'delete').
:param end_point: API endpoint (e.g. 'users/john/sets').
:param params: Dictionary to be sent in the query string (e.g. {'myparam': 'myval'})
:param client_id: Quizlet client ID as string.
:param access_token: Quizlet access token as string.
client_id and access_token are mutually exclusive but mandatory. | def api_call(method, end_point, params=None, client_id=None, access_token=None):
"""Call given API end_point with API keys.
:param method: HTTP method (e.g. 'get', 'delete').
:param end_point: API endpoint (e.g. 'users/john/sets').
:param params: Dictionary to be sent in the query string (e.g. {'myparam': 'myval'})
:param client_id: Quizlet client ID as string.
:param access_token: Quizlet access token as string.
client_id and access_token are mutually exclusive but mandatory.
"""
if bool(client_id) == bool(access_token):
raise ValueError('Either client_id or access_token')
url = 'https://api.quizlet.com/2.0/{}'.format(end_point)
if not params:
params = {}
if client_id:
params['client_id'] = client_id
headers = {'Authorization': 'Bearer {}'.format(access_token)} if access_token else None
response = requests.request(method, url, params=params, headers=headers)
if int(response.status_code / 100) != 2:
error_title = ''
try:
error_title += ', ' + response.json()['error_title']
except ValueError:
pass
except KeyError:
pass
raise ValueError(
'{} returned {}{}'.format(url, response.status_code, error_title)
)
try:
return response.json()
except json.decoder.JSONDecodeError:
pass | [
"async def call_async(self, method, **parameters):\n \"\"\"Makes an async call to the API.\n\n :param method: The method name.\n :param params: Additional parameters to send (for example, search=dict(id='b123') )\n :return: The JSON result (decoded into a dict) from the server.abs\n ... | [
0.7087173461914062,
0.7060321569442749,
0.7050427794456482,
0.6975470185279846,
0.6972582936286926,
0.6885087490081787,
0.6842185854911804,
0.6802607774734497,
0.6796349883079529,
0.6751108765602112,
0.6748965382575989,
0.6742551922798157
] |
:return: json with "keyId" as secret and "url" for posting key | def request_upload_secret(self, secret_id):
"""
:return: json with "keyId" as secret and "url" for posting key
"""
return self._router.post_request_upload_secret(org_id=self.organizationId,
instance_id=self.instanceId,
secret_id=secret_id).json() | [
"def POST(self, **kwargs):\n r'''\n Easily generate keys for a minion and auto-accept the new key\n\n Accepts all the same parameters as the :py:func:`key.gen_accept\n <salt.wheel.key.gen_accept>`.\n\n .. note:: A note about ``curl``\n Avoid using the ``-i`` flag or HTTP... | [
0.7206412553787231,
0.7182224988937378,
0.7178894281387329,
0.7159284353256226,
0.7123870849609375,
0.7001036405563354,
0.699505627155304,
0.6994513273239136,
0.6962321400642395,
0.693744421005249,
0.6907731890678406,
0.6809219717979431
] |
Function checkAndCreate
Check if a subnet exists and create it if not
@param key: The targeted subnet
@param payload: The targeted subnet description
@param domainId: The domainId to be attached wiuth the subnet
@return RETURN: The id of the subnet | def checkAndCreate(self, key, payload, domainId):
""" Function checkAndCreate
Check if a subnet exists and create it if not
@param key: The targeted subnet
@param payload: The targeted subnet description
@param domainId: The domainId to be attached wiuth the subnet
@return RETURN: The id of the subnet
"""
if key not in self:
self[key] = payload
oid = self[key]['id']
if not oid:
return False
#~ Ensure subnet contains the domain
subnetDomainIds = []
for domain in self[key]['domains']:
subnetDomainIds.append(domain['id'])
if domainId not in subnetDomainIds:
subnetDomainIds.append(domainId)
self[key]["domain_ids"] = subnetDomainIds
if len(self[key]["domains"]) is not len(subnetDomainIds):
return False
return oid | [
"def checkAndCreate(self, key, payload):\n \"\"\" Function checkAndCreate\n Check if an object exists and create it if not\n\n @param key: The targeted object\n @param payload: The targeted object description\n @return RETURN: The id of the object\n \"\"\"\n if key n... | [
0.7927488684654236,
0.7818474769592285,
0.7067288756370544,
0.6799399852752686,
0.6731507778167725,
0.6682384014129639,
0.6681324243545532,
0.663805365562439,
0.6622098088264465,
0.6585577726364136,
0.6567074656486511,
0.6497458219528198
] |
Function removeDomain
Delete a domain from a subnet
@param subnetId: The subnet Id
@param domainId: The domainId to be attached wiuth the subnet
@return RETURN: boolean | def removeDomain(self, subnetId, domainId):
""" Function removeDomain
Delete a domain from a subnet
@param subnetId: The subnet Id
@param domainId: The domainId to be attached wiuth the subnet
@return RETURN: boolean
"""
subnetDomainIds = []
for domain in self[subnetId]['domains']:
subnetDomainIds.append(domain['id'])
subnetDomainIds.remove(domainId)
self[subnetId]["domain_ids"] = subnetDomainIds
return len(self[subnetId]["domains"]) is len(subnetDomainIds) | [
"public MockSubnet deleteSubnet(final String subnetId) {\n\n if (subnetId != null && allMockSubnets.containsKey(subnetId)) {\n return allMockSubnets.remove(subnetId);\n }\n\n return null;\n }",
"def delete_subnet(self, subnet):\n '''\n Deletes the specified subnet\... | [
0.7380985617637634,
0.7272915244102478,
0.7221181392669678,
0.7110066413879395,
0.704708456993103,
0.6778912544250488,
0.6716000437736511,
0.6663321852684021,
0.6658353209495544,
0.6627442240715027,
0.6623795032501221,
0.6595145463943481
] |
Mark a callable as exclusive
:param via: factory for a Lock to guard the callable
Guards the callable against being entered again before completion.
Explicitly raises a :py:exc:`RuntimeError` on violation.
:note: If applied to a method, it is exclusive across all instances. | def exclusive(via=threading.Lock):
"""
Mark a callable as exclusive
:param via: factory for a Lock to guard the callable
Guards the callable against being entered again before completion.
Explicitly raises a :py:exc:`RuntimeError` on violation.
:note: If applied to a method, it is exclusive across all instances.
"""
def make_exclusive(fnc):
fnc_guard = via()
@functools.wraps(fnc)
def exclusive_call(*args, **kwargs):
if fnc_guard.acquire(blocking=False):
try:
return fnc(*args, **kwargs)
finally:
fnc_guard.release()
else:
raise RuntimeError('exclusive call to %s violated')
return exclusive_call
return make_exclusive | [
"def exclusively(f):\n \"\"\"\n Decorate a function to make it thread-safe by serializing invocations\n using a per-instance lock.\n \"\"\"\n @wraps(f)\n def exclusively_f(self, *a, **kw):\n with self._lock:\n return f(self, *a, **kw)\n return exclusively_f",
"def cross_vali... | [
0.6868792772293091,
0.6690714359283447,
0.668178141117096,
0.6552027463912964,
0.6545938849449158,
0.6495324969291687,
0.6395823955535889,
0.6382086873054504,
0.6369533538818359,
0.635248064994812,
0.6320596933364868,
0.6301057934761047
] |
r"""
Mark a class as implementing a Service
Each Service class must have a ``run`` method, which does not take any arguments.
This method is :py:meth:`~.ServiceRunner.adopt`\ ed after the daemon starts, unless
* the Service has been garbage collected, or
* the ServiceUnit has been :py:meth:`~.ServiceUnit.cancel`\ ed.
For each service instance, its :py:class:`~.ServiceUnit` is available at ``service_instance.__service_unit__``. | def service(flavour):
r"""
Mark a class as implementing a Service
Each Service class must have a ``run`` method, which does not take any arguments.
This method is :py:meth:`~.ServiceRunner.adopt`\ ed after the daemon starts, unless
* the Service has been garbage collected, or
* the ServiceUnit has been :py:meth:`~.ServiceUnit.cancel`\ ed.
For each service instance, its :py:class:`~.ServiceUnit` is available at ``service_instance.__service_unit__``.
"""
def service_unit_decorator(raw_cls):
__new__ = raw_cls.__new__
def __new_service__(cls, *args, **kwargs):
if __new__ is object.__new__:
self = __new__(cls)
else:
self = __new__(cls, *args, **kwargs)
service_unit = ServiceUnit(self, flavour)
self.__service_unit__ = service_unit
return self
raw_cls.__new__ = __new_service__
if raw_cls.run.__doc__ is None:
raw_cls.run.__doc__ = "Service entry point"
return raw_cls
return service_unit_decorator | [
"def run_services(config, *services, **kwargs):\n \"\"\" Serves a number of services for a contextual block.\n The caller can specify a number of service classes then serve them either\n stopping (default) or killing them on exiting the contextual block.\n\n\n Example::\n\n with run_services(conf... | [
0.8049406409263611,
0.7954756617546082,
0.7793565392494202,
0.7734854221343994,
0.7600001692771912,
0.7543185949325562,
0.7501642107963562,
0.7460813522338867,
0.7432956695556641,
0.7425548434257507,
0.7424837350845337,
0.7364528775215149
] |
Synchronously run ``payload`` and provide its output
If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution. | def execute(self, payload, *args, flavour: ModuleType, **kwargs):
"""
Synchronously run ``payload`` and provide its output
If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.
"""
if args or kwargs:
payload = functools.partial(payload, *args, **kwargs)
return self._meta_runner.run_payload(payload, flavour=flavour) | [
"def run_payload(self, payload, *, flavour: ModuleType):\n \"\"\"Execute one payload after its runner is started and return its output\"\"\"\n return self.runners[flavour].run_payload(payload)",
"def adopt(self, payload, *args, flavour: ModuleType, **kwargs):\n \"\"\"\n Concurrently ru... | [
0.8208741545677185,
0.7724427580833435,
0.742557168006897,
0.7417251467704773,
0.7405674457550049,
0.7298162579536438,
0.715425968170166,
0.709276020526886,
0.7042000889778137,
0.6956870555877686,
0.6918435096740723,
0.6878609657287598
] |
Concurrently run ``payload`` in the background
If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution. | def adopt(self, payload, *args, flavour: ModuleType, **kwargs):
"""
Concurrently run ``payload`` in the background
If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.
"""
if args or kwargs:
payload = functools.partial(payload, *args, **kwargs)
self._meta_runner.register_payload(payload, flavour=flavour) | [
"def execute(self, payload, *args, flavour: ModuleType, **kwargs):\n \"\"\"\n Synchronously run ``payload`` and provide its output\n\n If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.\n \"\"\"\n if args or kwargs:\n payload = func... | [
0.8430559635162354,
0.808273434638977,
0.754439651966095,
0.7477638721466064,
0.7286441326141357,
0.7200483679771423,
0.7090065479278564,
0.7083503603935242,
0.6952285766601562,
0.695154070854187,
0.6930971145629883,
0.6929183006286621
] |
Start accepting synchronous, asynchronous and service payloads
Since services are globally defined, only one :py:class:`ServiceRunner`
may :py:meth:`accept` payloads at any time. | def accept(self):
"""
Start accepting synchronous, asynchronous and service payloads
Since services are globally defined, only one :py:class:`ServiceRunner`
may :py:meth:`accept` payloads at any time.
"""
if self._meta_runner:
raise RuntimeError('payloads scheduled for %s before being started' % self)
self._must_shutdown = False
self._logger.info('%s starting', self.__class__.__name__)
# force collecting objects so that defunct, migrated and overwritten services are destroyed now
gc.collect()
self._adopt_services()
self.adopt(self._accept_services, flavour=trio)
self._meta_runner.run() | [
"def adopt(self, payload, *args, flavour: ModuleType, **kwargs):\n \"\"\"\n Concurrently run ``payload`` in the background\n\n If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.\n \"\"\"\n if args or kwargs:\n payload = functools.pa... | [
0.7352553009986877,
0.7254830002784729,
0.7184668183326721,
0.7160866856575012,
0.7129335403442383,
0.7126063704490662,
0.7119871973991394,
0.7111474871635437,
0.6988613605499268,
0.6975022554397583,
0.6972094178199768,
0.6907680630683899
] |
Shutdown the accept loop and stop running payloads | def shutdown(self):
"""Shutdown the accept loop and stop running payloads"""
self._must_shutdown = True
self._is_shutdown.wait()
self._meta_runner.stop() | [
"def stop(self):\n \"\"\"Stop execution of all current and future payloads\"\"\"\n if not self.running.wait(0.2):\n return\n self._logger.debug('runner disabled: %s', self)\n with self._lock:\n self.running.clear()\n self._stopped.wait()",
"def shutdown(sel... | [
0.7566830515861511,
0.7525043487548828,
0.7137492895126343,
0.7121773362159729,
0.7077625393867493,
0.7046758532524109,
0.7045189738273621,
0.7034379243850708,
0.6988451480865479,
0.698276937007904,
0.6970272064208984,
0.6965173482894897
] |
View/edit/close milestones on github | def milestones(ctx, list, close):
"""View/edit/close milestones on github
"""
repos = get_repos(ctx.parent.agile.get('labels'))
if list:
_list_milestones(repos)
elif close:
click.echo('Closing milestones "%s"' % close)
_close_milestone(repos, close)
else:
click.echo(ctx.get_help()) | [
"def get_milestones(repo_name=None,\n profile='github',\n state='open',\n sort='due_on',\n direction='asc',\n output='min',\n per_page=None):\n '''\n Return information about milestones for a given repo... | [
0.7411880493164062,
0.7096606492996216,
0.6985909342765808,
0.6985446214675903,
0.6976544260978699,
0.6975024342536926,
0.6945411562919617,
0.6902499794960022,
0.6846461892127991,
0.6774165034294128,
0.675011396408081,
0.6747035980224609
] |
Starts a console; modified from code.interact | def start_console(local_vars={}):
'''Starts a console; modified from code.interact'''
transforms.CONSOLE_ACTIVE = True
transforms.remove_not_allowed_in_console()
sys.ps1 = prompt
console = ExperimentalInteractiveConsole(locals=local_vars)
console.interact(banner=banner) | [
"def d(self, depth=1):\n \"\"\"Launches an interactive console at the point where it's called.\"\"\"\n info = self.inspect.getframeinfo(self.sys._getframe(1))\n s = self.Stanza(self.indent)\n s.add([info.function + ': '])\n s.add([self.MAGENTA, 'Interactive console opened', self.N... | [
0.7744324207305908,
0.7488301992416382,
0.7446141839027405,
0.740533709526062,
0.7396584153175354,
0.7339498996734619,
0.7265417575836182,
0.7221200466156006,
0.719450831413269,
0.7167628407478333,
0.7133338451385498,
0.7087196111679077
] |
Transform and push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()). | def push(self, line):
"""Transform and push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
if transforms.FROM_EXPERIMENTAL.match(line):
transforms.add_transformers(line)
self.buffer.append("\n")
else:
self.buffer.append(line)
add_pass = False
if line.rstrip(' ').endswith(":"):
add_pass = True
source = "\n".join(self.buffer)
if add_pass:
source += "pass"
source = transforms.transform(source)
if add_pass:
source = source.rstrip(' ')
if source.endswith("pass"):
source = source[:-4]
# some transformations may strip an empty line meant to end a block
if not self.buffer[-1]:
source += "\n"
try:
more = self.runsource(source, self.filename)
except SystemExit:
os._exit(1)
if not more:
self.resetbuffer()
return more | [
"def push(self, line):\n \"\"\"Push a line to the interpreter.\n\n The line should not have a trailing newline; it may have\n internal newlines. The line is appended to a buffer and the\n interpreter's runsource() method is called with the\n concatenated contents of the buffer as... | [
0.9045475125312805,
0.7175180912017822,
0.701181173324585,
0.6785591244697571,
0.6757619380950928,
0.6738131046295166,
0.6724637150764465,
0.6719925999641418,
0.6677915453910828,
0.6668353080749512,
0.6662765741348267,
0.665422260761261
] |
Write dict object into file
:param obj: the object to be dumped into toml
:param f: the file object
:param preserve: optional flag to preserve the inline table in result | def dump(obj, f, preserve=False):
"""Write dict object into file
:param obj: the object to be dumped into toml
:param f: the file object
:param preserve: optional flag to preserve the inline table in result
"""
if not f.write:
raise TypeError('You can only dump an object into a file object')
encoder = Encoder(f, preserve=preserve)
return encoder.write_dict(obj) | [
"def dumps(obj, preserve=False):\n \"\"\"Stringifies a dict as toml\n\n :param obj: the object to be dumped into toml\n :param preserve: optional flag to preserve the inline table in result\n \"\"\"\n f = StringIO()\n dump(obj, f, preserve)\n return f.getvalue()",
"def dump(o, f):\n \"\"\"... | [
0.8067118525505066,
0.7437865734100342,
0.7063112854957581,
0.7061246037483215,
0.7056741118431091,
0.7037335634231567,
0.7030279636383057,
0.6980135440826416,
0.6967124938964844,
0.6956966519355774,
0.6951221227645874,
0.6929183602333069
] |
Stringifies a dict as toml
:param obj: the object to be dumped into toml
:param preserve: optional flag to preserve the inline table in result | def dumps(obj, preserve=False):
"""Stringifies a dict as toml
:param obj: the object to be dumped into toml
:param preserve: optional flag to preserve the inline table in result
"""
f = StringIO()
dump(obj, f, preserve)
return f.getvalue() | [
"def dump(obj, f, preserve=False):\n \"\"\"Write dict object into file\n\n :param obj: the object to be dumped into toml\n :param f: the file object\n :param preserve: optional flag to preserve the inline table in result\n \"\"\"\n if not f.write:\n raise TypeError('You can only dump an obj... | [
0.8154090046882629,
0.7720668911933899,
0.7706790566444397,
0.7599770426750183,
0.6874340772628784,
0.6836484670639038,
0.6806732416152954,
0.6798952221870422,
0.6748935580253601,
0.6687008738517761,
0.6676678657531738,
0.6665995121002197
] |
Loads licenses from the given directory. | def license_loader(lic_dir=LIC_DIR):
"""Loads licenses from the given directory."""
lics = []
for ln in os.listdir(lic_dir):
lp = os.path.join(lic_dir, ln)
with open(lp) as lf:
txt = lf.read()
lic = License(txt)
lics.append(lic)
return lics | [
"private void loadCertificatesFromDirectory(File directory) {\n\n directorySanityChecks(directory);\n\n synchronized (listenerLock) {\n listener.notifyCertficateLookupEvent(directory.getAbsolutePath());\n }\n\n File[] certFiles = directory.listFiles(new FilenameFilter() {\n\n public boolean ac... | [
0.7496620416641235,
0.7284637093544006,
0.7106066942214966,
0.7100866436958313,
0.7081019878387451,
0.700099766254425,
0.6998074054718018,
0.6977293491363525,
0.6956530213356018,
0.695216715335846,
0.693123996257782,
0.6906286478042603
] |
Return pseudo-choice vectors. | def get_vector(self, max_choice=3):
"""Return pseudo-choice vectors."""
vec = {}
for dim in ['forbidden', 'required', 'permitted']:
if self.meta[dim] is None:
continue
dim_vec = map(lambda x: (x, max_choice), self.meta[dim])
vec[dim] = dict(dim_vec)
return vec | [
"def getUtilities(self, decision, orderVector):\n \"\"\"\n Returns a floats that contains the utilities of every candidate in the decision.\n\n :ivar list<int> decision: Contains a list of integer representations of candidates in the \n current decision.\n :ivar list<int> orde... | [
0.679014265537262,
0.6754641532897949,
0.6727849841117859,
0.6662163734436035,
0.6640924215316772,
0.6624693870544434,
0.6603453755378723,
0.6599308252334595,
0.6597691774368286,
0.6582732796669006,
0.6564963459968567,
0.6564908623695374
] |
CLI for tonomi.com using contrib-python-qubell-client
To enable completion:
eval "$(_NOMI_COMPLETE=source nomi)" | def entity(ctx, debug, uncolorize, **kwargs):
"""
CLI for tonomi.com using contrib-python-qubell-client
To enable completion:
eval "$(_NOMI_COMPLETE=source nomi)"
"""
global PROVIDER_CONFIG
if debug:
log.basicConfig(level=log.DEBUG)
log.getLogger("requests.packages.urllib3.connectionpool").setLevel(log.DEBUG)
for (k, v) in kwargs.iteritems():
if v:
QUBELL[k] = v
PROVIDER_CONFIG = {
'configuration.provider': PROVIDER['provider_type'],
'configuration.legacy-regions': PROVIDER['provider_region'],
'configuration.endpoint-url': '',
'configuration.legacy-security-group': '',
'configuration.identity': PROVIDER['provider_identity'],
'configuration.credential': PROVIDER['provider_credential']
}
class UserContext(object):
def __init__(self):
self.platform = None
self.unauthenticated_platform = None
self.colorize = not (uncolorize)
def get_platform(self):
if not self.platform:
assert QUBELL["tenant"], "No platform URL provided. Set QUBELL_TENANT or use --tenant option."
if not QUBELL["token"]:
assert QUBELL["user"], "No username. Set QUBELL_USER or use --user option."
assert QUBELL["password"], "No password provided. Set QUBELL_PASSWORD or use --password option."
self.platform = QubellPlatform.connect(
tenant=QUBELL["tenant"],
user=QUBELL["user"],
password=QUBELL["password"],
token=QUBELL["token"])
return self.platform
def get_unauthenticated_platform(self):
if not self.unauthenticated_platform:
assert QUBELL["tenant"], "No platform URL provided. Set QUBELL_TENANT or use --tenant option."
self.unauthenticated_platform = QubellPlatform.connect(tenant=QUBELL["tenant"])
return self.unauthenticated_platform
ctx = click.get_current_context()
ctx.obj = UserContext() | [
"def consult(string_in):\n \"\"\"\n provide file:consult/1 functionality with python types\n \"\"\"\n # pylint: disable=eval-used\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n\n # manually parse textual erlang data to avoid external dependencies\n list_out =... | [
0.6493841409683228,
0.6391778588294983,
0.6360665559768677,
0.6351642608642578,
0.63483065366745,
0.633405864238739,
0.6264551877975464,
0.6261827349662781,
0.6250460743904114,
0.6250454187393188,
0.6231980919837952,
0.6224732398986816
] |
Upload application from file.
By default, file name will be used as application name, with "-vXX.YYY" suffix stripped.
Application is looked up by one of these classifiers, in order of priority:
app-id, app-name, filename.
If app-id is provided, looks up existing application and updates its manifest.
If app-id is NOT specified, looks up by name, or creates new application. | def import_app(files, category, overwrite, id, name):
""" Upload application from file.
By default, file name will be used as application name, with "-vXX.YYY" suffix stripped.
Application is looked up by one of these classifiers, in order of priority:
app-id, app-name, filename.
If app-id is provided, looks up existing application and updates its manifest.
If app-id is NOT specified, looks up by name, or creates new application.
"""
platform = _get_platform()
org = platform.get_organization(QUBELL["organization"])
if category:
category = org.categories[category]
regex = re.compile(r"^(.*?)(-v(\d+)|)\.[^.]+$")
if (id or name) and len(files) > 1:
raise Exception("--id and --name are supported only for single-file mode")
for filename in files:
click.echo("Importing " + filename, nl=False)
if not name:
match = regex.match(basename(filename))
if not match:
click.echo(_color("RED", "FAIL") + " unknown filename format")
break
name = regex.match(basename(filename)).group(1)
click.echo(" => ", nl=False)
app = None
try:
app = org.get_application(id=id, name=name)
if app and not overwrite:
click.echo("%s %s already exists %s" % (
app.id, _color("BLUE", app and app.name or name), _color("RED", "FAIL")))
break
except NotFoundError:
if id:
click.echo("%s %s not found %s" % (
id or "", _color("BLUE", app and app.name or name), _color("RED", "FAIL")))
break
click.echo(_color("BLUE", app and app.name or name) + " ", nl=False)
try:
with file(filename, "r") as f:
if app:
app.update(name=app.name,
category=category and category.id or app.category,
manifest=Manifest(content=f.read()))
else:
app = org.application(id=id, name=name, manifest=Manifest(content=f.read()))
if category:
app.update(category=category.id)
click.echo(app.id + _color("GREEN", " OK"))
except IOError as e:
click.echo(_color("RED", " FAIL") + " " + e.message)
break | [
"def app_upload(path, name, manifest, package, docker_address, registry, manifest_only, **kwargs):\n \"\"\"\n Upload application with its environment (directory) into the storage.\n\n Application directory or its subdirectories must contain valid manifest file\n named `manifest.json` or `manifest` other... | [
0.7606568336486816,
0.722040593624115,
0.7040286064147949,
0.6995224952697754,
0.6908955574035645,
0.6881159543991089,
0.6877210140228271,
0.6844629049301147,
0.684209942817688,
0.68125319480896,
0.680411696434021,
0.6802532076835632
] |
Exports current account configuration in
shell-friendly form. Takes into account
explicit top-level flags like --organization. | def show_account():
"""
Exports current account configuration in
shell-friendly form. Takes into account
explicit top-level flags like --organization.
"""
click.echo("# tonomi api")
for (key, env) in REVERSE_MAPPING.items():
value = QUBELL.get(key, None)
if value:
click.echo("export %s='%s'" % (env, value))
if any(map(lambda x: PROVIDER.get(x), REVERSE_PROVIDER_MAPPING.keys())):
click.echo("# cloud account")
for (key, env) in REVERSE_PROVIDER_MAPPING.items():
value = PROVIDER.get(key, None)
if value:
click.echo("export %s='%s'" % (env, value)) | [
"def main(role, ou, assume, profile, output, regions, active):\n \"\"\"Generate a c7n-org accounts config file using AWS Organizations\n\n With c7n-org you can then run policies or arbitrary scripts across\n accounts.\n \"\"\"\n\n session = get_session(assume, 'c7n-org', profile)\n client = sessio... | [
0.7122164964675903,
0.6900547742843628,
0.6882532835006714,
0.6875327825546265,
0.6827141642570496,
0.6803291440010071,
0.6782150864601135,
0.677182674407959,
0.6758404970169067,
0.6755895018577576,
0.6741968989372253,
0.6734408140182495
] |
Generates new session token from the given refresh token.
:param refresh_token: refresh token to generate from
:param verbose: whether expiration time should be added to output | def generate_session_token(refresh_token, verbose):
"""
Generates new session token from the given refresh token.
:param refresh_token: refresh token to generate from
:param verbose: whether expiration time should be added to output
"""
platform = _get_platform(authenticated=False)
session_token, expires_in = platform.generate_session_token(refresh_token)
if verbose:
click.echo("%s\n\n%s" % (session_token, _color('YELLOW', "Expires in %d seconds" % expires_in)))
else:
click.echo(session_token) | [
"def use_refresh_token(self, refresh_token, scope=None):\n # type (str, Optional[List[str]]) -> Tuple[se_leg_op.access_token.AccessToken, Optional[str]]\n \"\"\"\n Creates a new access token, and refresh token, based on the supplied refresh token.\n :return: new access token and new refr... | [
0.6952347159385681,
0.6901059150695801,
0.6878064274787903,
0.6642931699752808,
0.6559945940971375,
0.6553444862365723,
0.6552048921585083,
0.6551014184951782,
0.6549071073532104,
0.6531271934509277,
0.6507424116134644,
0.6490411162376404
] |
Example interface commands | def runcommand(cosmology='WMAP5'):
""" Example interface commands """
# Return the WMAP5 cosmology concentration predicted for
# z=0 range of masses
Mi = [1e8, 1e9, 1e10]
zi = 0
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
print(output['c'].flatten())
# Return the WMAP5 cosmology concentration predicted for
# z=0 range of masses AND cosmological parameters
Mi = [1e8, 1e9, 1e10]
zi = 0
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
output, cosmo = commah.run(cosmology=cosmology, zi=zi, Mi=Mi,
retcosmo=True)
print(output['c'].flatten())
print(cosmo)
# Return the WMAP5 cosmology concentration predicted for MW
# mass (2e12 Msol) across redshift
Mi = 2e12
z = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=0, Mi=Mi, z=z)
for zval in z:
print("M(z=0)=%s has c(z=%s)=%s"
% (Mi, zval, output[output['z'] == zval]['c'].flatten()))
# Return the WMAP5 cosmology concentration predicted for MW
# mass (2e12 Msol) across redshift
Mi = 2e12
zi = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
for zval in zi:
print("M(z=%s)=%s has concentration %s"
% (zval, Mi, output[(output['zi'] == zval) &
(output['z'] == zval)]['c'].flatten()))
# Return the WMAP5 cosmology concentration and
# rarity of high-z cluster
Mi = 2e14
zi = 6
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
print(output['c'].flatten())
print("Mass variance sigma of haloes of mass %s at z=%s" % (Mi, zi))
print(output['sig'].flatten())
print("Fluctuation for haloes of mass %s at z=%s" % (Mi, zi))
print(output['nu'].flatten())
# Return the WMAP5 cosmology accretion rate prediction
# for haloes at range of redshift and mass
Mi = [1e8, 1e9, 1e10]
zi = [0]
z = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi, z=z)
for Mval in Mi:
print("dM/dt for halo of mass %s at z=%s across redshift %s is: "
% (Mval, zi, z))
print(output[output['Mi'] == Mval]['dMdt'].flatten())
# Return the WMAP5 cosmology Halo Mass History for haloes with M(z=0) = 1e8
M = [1e8]
z = [0, 0.5, 1, 1.5, 2, 2.5]
print("Halo Mass History for z=0 mass of %s across z=%s" % (M, z))
output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)
print(output['Mz'].flatten())
# Return the WMAP5 cosmology formation redshifts for haloes at
# range of redshift and mass
M = [1e8, 1e9, 1e10]
z = [0]
print("Formation Redshifts for haloes of mass %s at z=%s" % (M, z))
output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)
for Mval in M:
print(output[output['Mi'] == Mval]['zf'].flatten())
return("Done") | [
"def example_repl(self, text, example, start_index, continue_flag):\n \"\"\" REPL for interactive tutorials \"\"\"\n if start_index:\n start_index = start_index + 1\n cmd = ' '.join(text.split()[:start_index])\n example_cli = CommandLineInterface(\n appl... | [
0.7600148320198059,
0.7454254627227783,
0.7358937859535217,
0.7320149540901184,
0.7233145833015442,
0.7186895608901978,
0.7185418605804443,
0.7126096487045288,
0.7122504711151123,
0.7091414928436279,
0.7089841365814209,
0.7071307897567749
] |
Example ways to interrogate the dataset and plot the commah output | def plotcommand(cosmology='WMAP5', plotname=None):
""" Example ways to interrogate the dataset and plot the commah output """
# Plot the c-M relation as a functon of redshift
xarray = 10**(np.arange(1, 15, 0.2))
yval = 'c'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass (M$_{sol}$)"
ytitle = r"Concentration"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
plt.ylim([2, 30])
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray)
# Access the column yval from the data file
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray, label=linelabel+str(zval), color=colors[zind])
# Overplot the D08 predictions in black
ax.plot(xarray, commah.commah.cduffy(zval, xarray), color="black")
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_CM_relation.png'" % (plotname))
fig.savefig(plotname+"_CM_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the c-z relation as a function of mass (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'c'
# Specify the mass range
zarray = 10**np.arange(6, 14, 2)
xtitle = r"Redshift"
ytitle = r"NFW Concentration"
linelabel = r"log$_{10}$ M$_{z}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)
# Access the column yval from the data file
yarray = output[yval].flatten()
# Plot each line in turn with different colours
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_Cz_relation.png'" % (plotname))
fig.savefig(plotname+"_Cz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the zf-z relation for different masses (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'zf'
# Specify the mass range
zarray = 10**np.arange(6, 14, 2)
xtitle = r"Redshift"
ytitle = r"Formation Redshift"
linelabel = r"log$_{10}$ M$_{z}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_zfz_relation.png'" % (plotname))
fig.savefig(plotname+"_zfz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the dM/dt-z relation for different masses (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'dMdt'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"log$_{10}$ (1+z)"
ytitle = r"log$_{10}$ Accretion Rate M$_{sol}$ yr$^{-1}$"
linelabel = r"log$_{10}$ M$_z$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
cosmo = commah.getcosmo(cosmology)
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval,
com=False, mah=True)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(np.log10(xarray+1.), np.log10(yarray),
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
# Plot the semi-analytic approximate formula from Correa et al 2015b
semianalytic_approx = 71.6 * (zval / 1e12) * (cosmo['h'] / 0.7) *\
(-0.24 + 0.75 * (xarray + 1)) * np.sqrt(
cosmo['omega_M_0'] * (xarray + 1)**3 + cosmo['omega_lambda_0'])
ax.plot(np.log10(xarray + 1), np.log10(semianalytic_approx),
color='black')
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_dMdtz_relation.png'" % (plotname))
fig.savefig(plotname+"_dMdtz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the dMdt-M relation as a function of redshift
xarray = 10**(np.arange(10, 14, 0.5))
yval = 'dMdt'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass M$_{sol}$"
ytitle = r"Accretion Rate M$_{sol}$ yr$^{-1}$"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,
com=False, mah=True)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray, label=linelabel+str(zval),
color=colors[zind],)
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_MAH_M_relation.png'" % (plotname))
fig.savefig(plotname+"_MAH_M_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the (dM/M)dt-M relation as a function of redshift
xarray = 10**(np.arange(10, 14, 0.5))
yval = 'dMdt'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass M$_{sol}$"
ytitle = r"Specific Accretion Rate yr$^{-1}$"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,
mah=True, com=False)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray/xarray, label=linelabel+str(zval),
color=colors[zind],)
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_specificMAH_M_relation.png'" % (plotname))
fig.savefig(plotname+"_specificMAH_M_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the Mz-z relation as a function of mass
# (so mass is decreasing to zero as z-> inf)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'Mz'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"Redshift"
ytitle = r"M(z) (M$_{sol}$)"
linelabel = r"log$_{10}$ M$_{0}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_Mzz_relation.png'" % (plotname))
fig.savefig(plotname+"_Mzz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the Mz/M0-z relation as a function of mass
xarray = 10**(np.arange(0, 1, 0.02)) - 1
yval = 'Mz'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"Redshift"
ytitle = r"log$_{10}$ M(z)/M$_{0}$"
linelabel = r"log$_{10}$ M$_{0}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, np.log10(yarray/zval),
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=3)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_MzM0z_relation.png'" % (plotname))
fig.savefig(plotname+"_MzM0z_relation.png", dpi=fig.dpi*5)
else:
plt.show()
return("Done") | [
"def howPlotAsk(goodFormat):\n '''plots using inquirer prompts\n\n Arguments:\n goodFormat {dict} -- module : [results for module]\n '''\n plotAnswer = askPlot()\n if \"Save\" in plotAnswer['plotQ']:\n exportPlotsPath = pathlib.Path(askSave())\n if \"Show\" in plotAnswer['plotQ']... | [
0.7343975305557251,
0.7127325534820557,
0.7039694786071777,
0.6969481110572815,
0.6965529322624207,
0.6965466141700745,
0.6953907012939453,
0.692695140838623,
0.6920647621154785,
0.6920480132102966,
0.691997766494751,
0.6880983710289001
] |
Function enhance
Enhance the object with new item or enhanced items | def enhance(self):
""" Function enhance
Enhance the object with new item or enhanced items
"""
self.update({'puppetclasses':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemPuppetClasses)})
self.update({'parameters':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemParameter)})
self.update({'smart_class_parameters':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
ItemSmartClassParameter)}) | [
"def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'os_default_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n ... | [
0.8752948045730591,
0.8729672431945801,
0.8696154356002808,
0.8695272207260132,
0.8679497838020325,
0.8407313823699951,
0.7447202205657959,
0.7079142928123474,
0.707258939743042,
0.6839107275009155,
0.6810131072998047,
0.6799886226654053
] |
Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict | def add_transformers(line):
'''Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict
'''
assert FROM_EXPERIMENTAL.match(line)
line = FROM_EXPERIMENTAL.sub(' ', line)
# we now have: " transformer1 [,...]"
line = line.split("#")[0] # remove any end of line comments
# and insert each transformer as an item in a list
for trans in line.replace(' ', '').split(','):
import_transformer(trans) | [
"def extract_transformers_from_source(source):\n '''Scan a source for lines of the form\n from __experimental__ import transformer1 [,...]\n identifying transformers to be used. Such line is passed to the\n add_transformer function, after which it is removed from the\n code to be executed... | [
0.8757690787315369,
0.735373854637146,
0.6851067543029785,
0.6705670356750488,
0.6693321466445923,
0.6668049693107605,
0.6660348773002625,
0.6654304265975952,
0.6635963916778564,
0.6592937707901001,
0.6576693058013916,
0.6569241285324097
] |
If needed, import a transformer, and adds it to the globally known dict
The code inside a module where a transformer is defined should be
standard Python code, which does not need any transformation.
So, we disable the import hook, and let the normal module import
do its job - which is faster and likely more reliable than our
custom method. | def import_transformer(name):
'''If needed, import a transformer, and adds it to the globally known dict
The code inside a module where a transformer is defined should be
standard Python code, which does not need any transformation.
So, we disable the import hook, and let the normal module import
do its job - which is faster and likely more reliable than our
custom method.
'''
if name in transformers:
return transformers[name]
# We are adding a transformer built from normal/standard Python code.
# As we are not performing transformations, we temporarily disable
# our import hook, both to avoid potential problems AND because we
# found that this resulted in much faster code.
hook = sys.meta_path[0]
sys.meta_path = sys.meta_path[1:]
try:
transformers[name] = __import__(name)
# Some transformers are not allowed in the console.
# If an attempt is made to activate one of them in the console,
# we replace it by a transformer that does nothing and print a
# message specific to that transformer as written in its module.
if CONSOLE_ACTIVE:
if hasattr(transformers[name], "NO_CONSOLE"):
print(transformers[name].NO_CONSOLE)
transformers[name] = NullTransformer()
except ImportError:
sys.stderr.write("Warning: Import Error in add_transformers: %s not found\n" % name)
transformers[name] = NullTransformer()
except Exception as e:
sys.stderr.write("Unexpected exception in transforms.import_transformer%s\n " %
e.__class__.__name__)
finally:
sys.meta_path.insert(0, hook) # restore import hook
return transformers[name] | [
"def add_transformers(line):\n '''Extract the transformers names from a line of code of the form\n from __experimental__ import transformer1 [,...]\n and adds them to the globally known dict\n '''\n assert FROM_EXPERIMENTAL.match(line)\n\n line = FROM_EXPERIMENTAL.sub(' ', line)\n # we no... | [
0.740732729434967,
0.7014108300209045,
0.6842485070228577,
0.6801718473434448,
0.6798920035362244,
0.6796746253967285,
0.6788545250892639,
0.6783643364906311,
0.6754996180534363,
0.6742110848426819,
0.6730639934539795,
0.6721809506416321
] |
Scan a source for lines of the form
from __experimental__ import transformer1 [,...]
identifying transformers to be used. Such line is passed to the
add_transformer function, after which it is removed from the
code to be executed. | def extract_transformers_from_source(source):
'''Scan a source for lines of the form
from __experimental__ import transformer1 [,...]
identifying transformers to be used. Such line is passed to the
add_transformer function, after which it is removed from the
code to be executed.
'''
lines = source.split('\n')
linenumbers = []
for number, line in enumerate(lines):
if FROM_EXPERIMENTAL.match(line):
add_transformers(line)
linenumbers.insert(0, number)
# drop the "fake" import from the source code
for number in linenumbers:
del lines[number]
return '\n'.join(lines) | [
"def add_transformers(line):\n '''Extract the transformers names from a line of code of the form\n from __experimental__ import transformer1 [,...]\n and adds them to the globally known dict\n '''\n assert FROM_EXPERIMENTAL.match(line)\n\n line = FROM_EXPERIMENTAL.sub(' ', line)\n # we no... | [
0.8276940584182739,
0.7044193744659424,
0.6995483040809631,
0.6984575390815735,
0.6789594888687134,
0.6653650999069214,
0.660295844078064,
0.6600147485733032,
0.659911572933197,
0.6583966612815857,
0.6572274565696716,
0.6546871662139893
] |
This function should be called from the console, when it starts.
Some transformers are not allowed in the console and they could have
been loaded prior to the console being activated. We effectively remove them
and print an information message specific to that transformer
as written in the transformer module. | def remove_not_allowed_in_console():
'''This function should be called from the console, when it starts.
Some transformers are not allowed in the console and they could have
been loaded prior to the console being activated. We effectively remove them
and print an information message specific to that transformer
as written in the transformer module.
'''
not_allowed_in_console = []
if CONSOLE_ACTIVE:
for name in transformers:
tr_module = import_transformer(name)
if hasattr(tr_module, "NO_CONSOLE"):
not_allowed_in_console.append((name, tr_module))
for name, tr_module in not_allowed_in_console:
print(tr_module.NO_CONSOLE)
# Note: we do not remove them, so as to avoid seeing the
# information message displayed again if an attempt is
# made to re-import them from a console instruction.
transformers[name] = NullTransformer() | [
"private static synchronized void addTransformer() {\n if (detailedTransformTrace && tc.isEntryEnabled())\n Tr.entry(tc, \"addTransformer\");\n\n if (registeredTransformer == null && instrumentation != null) {\n registeredTransformer = new LibertyRuntimeTransformer();\n ... | [
0.7023383378982544,
0.6878604292869568,
0.6830065846443176,
0.6781601905822754,
0.6752806901931763,
0.6651531457901001,
0.6632915139198303,
0.6570521593093872,
0.6551002860069275,
0.6537941694259644,
0.6495481133460999,
0.6486765742301941
] |
Used to convert the source code, making use of known transformers.
"transformers" are modules which must contain a function
transform_source(source)
which returns a tranformed source.
Some transformers (for example, those found in the standard library
module lib2to3) cannot cope with non-standard syntax; as a result, they
may fail during a first attempt. We keep track of all failing
transformers and keep retrying them until either they all succeeded
or a fixed set of them fails twice in a row. | def transform(source):
'''Used to convert the source code, making use of known transformers.
"transformers" are modules which must contain a function
transform_source(source)
which returns a tranformed source.
Some transformers (for example, those found in the standard library
module lib2to3) cannot cope with non-standard syntax; as a result, they
may fail during a first attempt. We keep track of all failing
transformers and keep retrying them until either they all succeeded
or a fixed set of them fails twice in a row.
'''
source = extract_transformers_from_source(source)
# Some transformer fail when multiple non-Python constructs
# are present. So, we loop multiple times keeping track of
# which transformations have been unsuccessfully performed.
not_done = transformers
while True:
failed = {}
for name in not_done:
tr_module = import_transformer(name)
try:
source = tr_module.transform_source(source)
except Exception as e:
failed[name] = tr_module
# from traceback import print_exc
# print("Unexpected exception in transforms.transform",
# e.__class__.__name__)
# print_exc()
if not failed:
break
# Insanity is doing the same Tting over and overaAgain and
# expecting different results ...
# If the exact same set of transformations are not performed
# twice in a row, there is no point in trying out a third time.
if failed == not_done:
print("Warning: the following transforms could not be done:")
for key in failed:
print(key)
break
not_done = failed # attempt another pass
return source | [
"def extract_transformers_from_source(source):\n '''Scan a source for lines of the form\n from __experimental__ import transformer1 [,...]\n identifying transformers to be used. Such line is passed to the\n add_transformer function, after which it is removed from the\n code to be executed... | [
0.7585968971252441,
0.7482191920280457,
0.7213826179504395,
0.7133317589759827,
0.7072563767433167,
0.7071275115013123,
0.7033327221870422,
0.7027499079704285,
0.7023561596870422,
0.7019122242927551,
0.7003361582756042,
0.7002484202384949
] |
Match all requests/responses that satisfy the following conditions:
* An Admin App; i.e. the path is something like /admin/some_app/
* The ``include_flag`` is not in the response's content | def _match(self, request, response):
"""Match all requests/responses that satisfy the following conditions:
* An Admin App; i.e. the path is something like /admin/some_app/
* The ``include_flag`` is not in the response's content
"""
is_html = 'text/html' in response.get('Content-Type', '')
if is_html and hasattr(response, 'rendered_content'):
correct_path = PATH_MATCHER.match(request.path) is not None
not_included = self.include_flag not in response.rendered_content
return correct_path and not_included
return False | [
"def _is_request_in_include_path(self, request):\n \"\"\"Check if the request path is in the `_include_paths` list.\n\n If no specific include paths are given then we assume that\n authentication is required for all paths.\n\n \"\"\"\n if self._include_paths:\n for path... | [
0.6703752875328064,
0.6700488924980164,
0.6664093136787415,
0.6612303853034973,
0.6594433188438416,
0.654962956905365,
0.6497297883033752,
0.649634599685669,
0.6488224267959595,
0.6488022208213806,
0.6486542224884033,
0.6471434235572815
] |
Read the minified CSS file including STATIC_URL in the references
to the sprite images. | def _chosen_css(self):
"""Read the minified CSS file including STATIC_URL in the references
to the sprite images."""
css = render_to_string(self.css_template, {})
for sprite in self.chosen_sprites: # rewrite path to sprites in the css
css = css.replace(sprite, settings.STATIC_URL + "img/" + sprite)
return css | [
"def static(request):\n \"Shorthand static URLs. In debug mode, the JavaScript is not minified.\"\n static_url = settings.STATIC_URL\n prefix = 'src' if settings.DEBUG else 'min'\n return {\n 'CSS_URL': os.path.join(static_url, 'stylesheets/css'),\n 'IMAGES_URL': os.path.join(static_url, '... | [
0.7316712737083435,
0.6827130913734436,
0.6762562394142151,
0.674936830997467,
0.6738751530647278,
0.6731905937194824,
0.6721377968788147,
0.6718498468399048,
0.6640922427177429,
0.6606653332710266,
0.659768283367157,
0.6592134833335876
] |
Embed Chosen.js directly in html of the response. | def _embed(self, request, response):
"""Embed Chosen.js directly in html of the response."""
if self._match(request, response):
# Render the <link> and the <script> tags to include Chosen.
head = render_to_string(
"chosenadmin/_head_css.html",
{"chosen_css": self._chosen_css()}
)
body = render_to_string(
"chosenadmin/_script.html",
{"chosen_js": self._chosen_js()}
)
# Re-write the Response's content to include our new html
content = response.rendered_content
content = content.replace('</head>', head)
content = content.replace('</body>', body)
response.content = content
return response | [
"function (data) {\n $.ajax({\n url: trumbowyg.o.plugins.noembed.proxy,\n type: 'GET',\n data: data,\n cache: false,\n ... | [
0.6733248829841614,
0.6718196868896484,
0.6667999625205994,
0.6569007635116577,
0.6564831137657166,
0.6560764312744141,
0.6543261408805847,
0.6530284285545349,
0.6528116464614868,
0.6516057848930359,
0.6515606045722961,
0.6499364376068115
] |
Close the I2C bus | def clean_up(self):
"""
Close the I2C bus
"""
self.log.debug("Closing I2C bus for address: 0x%02X" % self.address)
self.bus.close() | [
"def close(self):\n \"\"\"\n Close the i2c connection.\n \"\"\"\n if self.fd:\n os.close(self.fd)\n self.fd = None",
"func (conn *I2CConnection) Close() error {\n\tglog.V(2).Info(\"hd44780: closing I2C bus\")\n\treturn conn.I2C.Close()\n}",
"def close(self):\n ... | [
0.8122109770774841,
0.7482624053955078,
0.7416961789131165,
0.7368502020835876,
0.731259286403656,
0.728217601776123,
0.7252492904663086,
0.7216896414756775,
0.717282772064209,
0.7143097519874573,
0.7116347551345825,
0.711581826210022
] |
Send only the read / write bit | def write_quick(self):
"""
Send only the read / write bit
"""
self.bus.write_quick(self.address)
self.log.debug("write_quick: Sent the read / write bit") | [
"def write(self, b):\n '''write some bytes'''\n from . import mavutil\n self.debug(\"sending '%s' (0x%02x) of len %u\\n\" % (b, ord(b[0]), len(b)), 2)\n while len(b) > 0:\n n = len(b)\n if n > 70:\n ... | [
0.7094565033912659,
0.6987737417221069,
0.6941272020339966,
0.6860069632530212,
0.6829076409339905,
0.6804078221321106,
0.6793176531791687,
0.6791053414344788,
0.6791037917137146,
0.6784651875495911,
0.6781317591667175,
0.6769970655441284
] |
Writes an 8-bit byte to the specified command register | def write_byte(self, cmd, value):
"""
Writes an 8-bit byte to the specified command register
"""
self.bus.write_byte_data(self.address, cmd, value)
self.log.debug(
"write_byte: Wrote 0x%02X to command register 0x%02X" % (
value, cmd
)
) | [
"def _write8(self, reg, value):\n \"\"\"Write a 8-bit value to a register.\"\"\"\n self.i2c.write8(TCS34725_COMMAND_BIT | reg, value)",
"def _write8(self, reg, value):\n \"\"\"Write a 8-bit value to a register.\"\"\"\n self._device.write8(TCS34725_COMMAND_BIT | reg, value)",
"def wri... | [
0.8645734190940857,
0.8588345646858215,
0.7863130569458008,
0.7781301736831665,
0.7779532074928284,
0.7688304781913757,
0.7631951570510864,
0.7490862011909485,
0.7449775338172913,
0.7423670291900635,
0.7418322563171387,
0.7391216158866882
] |
Writes a 16-bit word to the specified command register | def write_word(self, cmd, value):
"""
Writes a 16-bit word to the specified command register
"""
self.bus.write_word_data(self.address, cmd, value)
self.log.debug(
"write_word: Wrote 0x%04X to command register 0x%02X" % (
value, cmd
)
) | [
"def write_byte(self, cmd, value):\n \"\"\"\n Writes an 8-bit byte to the specified command register\n \"\"\"\n self.bus.write_byte_data(self.address, cmd, value)\n self.log.debug(\n \"write_byte: Wrote 0x%02X to command register 0x%02X\" % (\n value, cmd... | [
0.7869971394538879,
0.777798593044281,
0.7590028047561646,
0.7558252215385437,
0.7445629835128784,
0.744454562664032,
0.7431163191795349,
0.7430921196937561,
0.7393445372581482,
0.7382387518882751,
0.7364676594734192,
0.7359804511070251
] |
Writes an 8-bit byte directly to the bus | def write_raw_byte(self, value):
"""
Writes an 8-bit byte directly to the bus
"""
self.bus.write_byte(self.address, value)
self.log.debug("write_raw_byte: Wrote 0x%02X" % value) | [
"def write_byte(self, cmd, value):\n \"\"\"\n Writes an 8-bit byte to the specified command register\n \"\"\"\n self.bus.write_byte_data(self.address, cmd, value)\n self.log.debug(\n \"write_byte: Wrote 0x%02X to command register 0x%02X\" % (\n value, cmd... | [
0.824142575263977,
0.7746727466583252,
0.7695673108100891,
0.7655920386314392,
0.7634241580963135,
0.760619580745697,
0.7577587962150574,
0.7562206983566284,
0.7498108744621277,
0.7486347556114197,
0.7472392916679382,
0.7456278800964355
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.