Special to my personal use:
help(scipy.optimize)
names = np.array(['Jim', 'Luke', 'Josh', 'Pete'])
first_letter_j = np.vectorize(lambda s: s[0])(names)=='J' # np.vectorize is to transform functions which are not numpy-aware (e.g. take floats as input and return floats as output) into functions that can operate on (and return) numpy arrays.
def dispatch_dict(operator, x, y):
return {
'add': lambda: x + y,
'sub': lambda: x - y,
'mul': lambda: x * y,
'div': lambda: x / y,
}.get(operator, lambda: None)()
dispatch_dict('add', 1, 2)
cities = ['mmarent', 'amstardan','dfs','new york']
smallest, *rest, largest = cities
#file reading process each line one by one
f = open('pandora.txt')
text = f.read()
for line in text.split('\n'):
print(line)
f.close()
#above is bad because we dont' need to manullay read, go througho ne by one so
#good way
f = open('pandora.txt')
for line in f:
print(line)
f.close()
#even better way using with statement, dont' bother with clean up
with open('pandora.txt') as f:
for line in f:
print(line)
#escape the quotes
id_query = ' and fds.FS_ENTITY_ID in ('%s')' % ('\', \''.join(str(id) for id in missingIdList))
need to escape the special character (for >1 len patterns)
In [28]: dollars.str.replace(r'-\$', '-')
#column name modify
info = info.rename(columns=lambda x: x.strip())
# strings strip space, join, insert
words.insert(0, w)
words.insert(5, w)
#valueerror usage
if retries < 0:
raise ValueError('invalid user response')
#break down a long list to short one
l = range(1, 1000)
def chunks(l, n):
'''Yield successive n-sized chunks from l.'''
for i in range(0, len(l), n):
yield l[i:i + n]
#if not usage
def all(iterable):
for element in iterable:
if not element:
return False
return True
# sort a list, not only sort_values in a table
>>> sorted(testlist, reverse = True)
[4, 3, 2, 1]
#sort baed on frequency word occured
for key, value in sorted(word_count.items(), key=opeator.itemgetter(1)):
print(key, value)
stocks = {
'GOOG': 520.45,
'FB': 76.45,
'YHOO': 39.45,
'AMZN': 306.78,
'AAPL': 99.76
}
print(min(zip(stocks.values(), stocks.keys())))
print(sorted(zip(stocks.values(), stocks.keys())))
print(sorted(zip(stocks.keys(), stocks.values())))
sorted([5, 2, 3, 1, 4])
>>> a = [5, 2, 3, 1, 4]
>>> a.sort()
>>> sorted({1: 'D', 2: 'B', 3: 'B', 4: 'E', 5: 'A'})
[1, 2, 3, 4, 5]
>>> sorted('This is a test string from Andrew'.split(), key=str.lower)
['a', 'Andrew', 'from', 'is', 'string', 'test', 'This']
dicttest = {1: 'D', 2: 'B', 3: 'B', 4: 'E', 5: 'A'}
dicttest.keys()
sorted(dicttest)
sorted(dicttest.values())
#getattr, getattr(object, attribute, default)
class Person:
name = "John"
age = 36
country = "Norway"
x = getattr(Person, 'page', 'my message')
getattr(Person, 'name', 'my message') #John
The delattr() function, to remove an attribute
The hasattr() function, to check if an attribute exist
The setattr() function, to set the value of an attribute
#conditions
if (not status_check.empty) and (status_check['end_ts'].iloc[0] is None):
prediction_ts = status_check['prediction_ts'].iloc[0]
def greet_me(**kwargs):
if kwargs is not None:
for key, value in kwargs.iteritems():
print '%s == %s' %(key,value)
df.reindex_like(df2)
ts2.reindex(ts.index, method='ffill')
ts2.reindex(ts.index).fillna(method='ffill')
ts2.reindex(ts.index).interpolate(method='ffill')
reset_index()
reset_index(drop=True) drops the current index of the DataFrame and replaces it with an index of increasing integers