Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
fout.write(target)
# Prepare input pickle files
input_file_names = []
output_file_names = []
for i, job in enumerate(jobs):
filename = '{}/input-{}_{}.pickle'.format(path, suffix, i)
input_file_names.append(filename)
for j in range(n):
output_file_names.append('output-{}_{}.{}.pickle'.format(suffix, i, j + 1))
with open(filename, mode='wb') as fout:
pickle.dump(job, fout, protocol=2)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=_STORAGE_ACCOUNT_NAME,
account_key=_STORAGE_ACCOUNT_KEY)
n_jobs = -(-(len(jobs) * n) // nproc) # ceil for int
_log.info('{} jobs will be created.'.format(n_jobs))
res = None
try:
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
app_container_name = 'application-{}'.format(suffix)
input_container_name = 'input-{}'.format(suffix)
output_container_name = 'output-{}'.format(suffix)
# app_container_name = 'application'
# input_container_name = 'input'
# output_container_name = 'output'
def setUp(self):
super(StorageBlobEncryptionTest, self).setUp()
self.bbs = self._create_storage_service(BlockBlobService, self.settings)
self.pbs = self._create_storage_service(PageBlobService, self.settings)
self.service_dict = {'block_blob':self.bbs,
'page_blob':self.pbs}
self.container_name = self.get_resource_name('utcontainer')
self.bytes = b'Foo'
if not self.is_playback():
self.bbs.create_container(self.container_name)
self.bbs.MAX_BLOCK_SIZE = 4 * 1024
self.bbs.MAX_SINGLE_PUT_SIZE = 32 * 1024
self.pbs.MAX_PAGE_SIZE = 4 * 1024
try:
# for azure-storage-blob>=12:
from azure.storage.blob import BlobServiceClient
from azure.core.exceptions import AzureError
s = BlobServiceClient.from_connection_string(conn_string)
try:
s.delete_container(container)
except AzureError as ex:
# ignore the ContainerNotFound error:
if ex.error_code != 'ContainerNotFound':
raise
except ImportError:
# for azure-storage-blob<12
from azure.storage.blob import BlockBlobService
s = BlockBlobService(connection_string=conn_string)
s.delete_container(container)
def get_azure_credentials(all_credentials):
"""Return the subscription_id and credentials for Azure.
Takes a dict where key is the cloud name, expected to be formatted like
cloud-city's credentials.
"""
azure_dict = all_credentials['azure']['credentials']
subscription_id = azure_dict['subscription-id']
return subscription_id, ServicePrincipalCredentials(
client_id=azure_dict['application-id'],
secret=azure_dict['application-password'],
tenant=azure_dict['tenant-id'],
subscription_id=azure_dict['subscription-id'],
)
url = LogDownloaderURL.format(ACCOUNT_NAME=conn_string_dict['AccountName'], ACCOUNT_KEY=conn_string_dict['AccountKey'].replace('+','%2b'), CONTAINER=container, START_DATE=start_date.strftime("%Y-%m-%d"), END_DATE=(end_date+datetime.timedelta(days=1)).strftime("%Y-%m-%d"))
r = requests.post(url)
open(output_fp, 'wb').write(r.content)
print(' Done!\n')
except Exception as e:
print('Error: {}'.format(e))
else: # using BlockBlobService python api for cooked logs
try:
print('Establishing Azure Storage BlockBlobService connection using ',end='')
if sas_token and account_name:
print('sas token...')
bbs = BlockBlobService(account_name=account_name, sas_token=sas_token)
else:
print('connection string...')
bbs = BlockBlobService(connection_string=conn_string)
# List all blobs and download them one by one
print('Getting blobs list...')
blobs = bbs.list_blobs(container)
except Exception as e:
if type(e.args[0]) == str and e.args[0].startswith('The specified container does not exist.'):
print("Error: The specified container ({}) does not exist.".format(container))
else:
print("Error:\nType: {}\nArgs: {}".format(type(e).__name__, e.args))
sys.exit()
print('Iterating through blobs...\n')
selected_fps = []
for blob in blobs:
if '/data/' not in blob.name:
if verbose:
print('{} - Skip: Non-data blob\n'.format(blob.name))
def update_cluster_durability(cmd, client, resource_group_name, cluster_name, node_type, durability_level):
cli_ctx = cmd.cli_ctx
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
compute_client = compute_client_factory(cli_ctx)
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, node_type)
fabric_exts = [ext for ext in vmss.virtual_machine_profile.extension_profile.extensions
if ext.type.lower() == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or ext.type.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]
if fabric_exts is None:
raise CLIError("Failed to find service fabric extension")
fabric_ext = fabric_exts[0]
if fabric_ext.settings['durabilityLevel'] == durability_level:
return cluster
fabric_ext.settings['durabilityLevel'] = durability_level
fabric_ext.settings['enableParallelJobs'] = True
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
node_type = node_types[0]
node_type.durability_level = durability_level
patch_request = ClusterUpdateParameters(node_types=node_types)
update_cluster_poll = client.update(
resource_group_name, cluster_name, patch_request)
def wait(ctx, poller):
return LongRunningOperation(ctx)(poller)
subnet_resource_id=subnet_resource_id
if is_valid_resource_id(subnet_resource_id) else None)
# Create the load balancer configurations
load_balancer_object = LoadBalancerConfiguration(private_ip_address=private_ip_object,
public_ip_address_resource_id=public_ip_address_resource_id,
load_balancer_resource_id=load_balancer_resource_id,
probe_port=probe_port,
sql_virtual_machine_instances=sql_virtual_machine_instances)
# Create the availability group listener object
ag_listener_object = AvailabilityGroupListener(availability_group_name=availability_group_name,
load_balancer_configurations=[load_balancer_object],
port=port)
LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(False, client.create_or_update, resource_group_name,
sql_virtual_machine_group_name, availability_group_listener_name,
ag_listener_object))
return client.get(resource_group_name, sql_virtual_machine_group_name, availability_group_listener_name)
network.network_security_groups.create_or_update(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=NetworkSecurityGroup(location=location)
)
)
created_nsg = True
# update the NSG with the new rule to allow inbound traffic
SecurityRule = cmd.get_models('SecurityRule', resource_type=ResourceType.MGMT_NETWORK)
rule_name = 'open-port-all' if port == '*' else 'open-port-{}'.format(port)
rule = SecurityRule(protocol='*', access='allow', direction='inbound', name=rule_name,
source_port_range='*', destination_port_range=port, priority=priority,
source_address_prefix='*', destination_address_prefix='*')
nsg_name = nsg.name or os.path.split(nsg.id)[1]
LongRunningOperation(cmd.cli_ctx, 'Adding security rule')(
network.security_rules.create_or_update(
resource_group_name, nsg_name, rule_name, rule)
)
# update the NIC or subnet if a new NSG was created
if created_nsg and not apply_to_subnet:
nic.network_security_group = nsg
LongRunningOperation(cmd.cli_ctx, 'Updating NIC')(network.network_interfaces.create_or_update(
resource_group_name, nic.name, nic))
elif created_nsg and apply_to_subnet:
subnet.network_security_group = nsg
LongRunningOperation(cmd.cli_ctx, 'Updating subnet')(network.subnets.create_or_update(
resource_group_name=resource_group_name,
virtual_network_name=subnet_id['name'],
subnet_name=subnet_id['child_name_1'],
subnet_parameters=subnet
deployment_name: The name of the deployment.
role_name: The name of the role.
post_capture_action:
Specifies the action after capture operation completes. Possible
values are: Delete, Reprovision.
target_image_name:
Specifies the image name of the captured virtual machine.
target_image_label:
Specifies the friendly name of the captured virtual machine.
provisioning_configuration:
Use an instance of WindowsConfigurationSet or LinuxConfigurationSet.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
_validate_not_none('post_capture_action', post_capture_action)
_validate_not_none('target_image_name', target_image_name)
_validate_not_none('target_image_label', target_image_label)
return self._perform_post(
self._get_role_instance_operations_path(
service_name, deployment_name, role_name),
_XmlSerializer.capture_role_to_xml(
post_capture_action,
target_image_name,
target_image_label,
provisioning_configuration),
async=True)
if_unmodified_since:
Optional. A DateTime value. Specify this conditional header to
write the page only if the blob has not been modified since the
specified date/time. If the blob has been modified, the Blob
service fails.
if_match:
Optional. An ETag value. Specify an ETag value for this conditional
header to write the page only if the blob's ETag value matches the
value specified. If the values do not match, the Blob service fails.
if_none_match:
Optional. An ETag value. Specify an ETag value for this conditional
header to write the page only if the blob's ETag value does not
match the value specified. If the values are identical, the Blob
service fails.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('page', page)
_validate_not_none('x_ms_range', x_ms_range)
_validate_not_none('x_ms_page_write', x_ms_page_write)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=page'
request.headers = [
('x-ms-range', _str_or_none(x_ms_range)),
('Content-MD5', _str_or_none(content_md5)),
('x-ms-page-write', _str_or_none(x_ms_page_write)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-if-sequence-number-le',
_str_or_none(x_ms_if_sequence_number_lte)),