Python Boto3 Read Timeout on Endpoint Url: "None"
Python botocore.client.Config() Examples
The following are 30 code examples for showing how to use botocore.client.Config() . These examples are extracted from open up source projects. You can vote up the ones you like or vote down the ones you don't similar, and go to the original project or source file past following the links above each case.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module botocore.client , or endeavor the search role .
Example 1
def get_s3_client(unsigned=True): """Return a boto3 S3 customer with optional unsigned config. Parameters ---------- unsigned : Optional[bool] If True, the client volition be using unsigned mode in which public resources can be accessed without credentials. Default: True Returns ------- botocore.client.S3 A client object to AWS S3. """ if unsigned: return boto3.client('s3', config=Config(signature_version=UNSIGNED)) else: return boto3.client('s3')
Case two
def proxy_response(req): s3 = boto3.resource('s3') s3_client = boto3.customer('s3', config=Config(signature_version='s3v4')) saucepan = s3.Bucket(BUCKET_NAME) file_name = str(uuid4()) obj = bucket.put_object( Central=file_name, Torso=req.content, ACL="authenticated-read", ContentType=req.headers["content-blazon"] ) url = s3_client.generate_presigned_url( "get_object", Params={ "Saucepan": BUCKET_NAME, "Cardinal": file_name}, ExpiresIn=120 ) return redirect(url, 303)
Example three
def _save_chunk(self, data, chunk_info, executor=None): # Keyname key_name = f"{self.strax_unique_key}/{chunk_info['chunk_i']:06d}" # Save chunk via temporary file with tempfile.SpooledTemporaryFile() as f: filesize = strax.save_file(f, data=information, compressor=self.medico['compressor']) f.seek(0) self.s3.upload_fileobj(f, BUCKET_NAME, key_name, Config=cocky.config) return dict(key_name=key_name, filesize=filesize), None
Case 4
def s3_cleanup(bucket, cluster_name, user_name): s3_res = boto3.resource('s3', config=Config(signature_version='s3v4')) customer = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=os.environ['aws_region']) try: client.head_bucket(Bucket=bucket) except: print("At that place is no bucket {} or you do not permission to access it".format(saucepan)) sys.go out(0) effort: resource = s3_res.Bucket(saucepan) prefix = user_name + '/' + cluster_name + "/" for i in resources.objects.filter(Prefix=prefix): s3_res.Object(resources.proper noun, i.key).delete() except Exception equally err: logging.info("Unable to clean S3 bucket: " + str(err) + "\north Traceback: " + traceback.print_exc(file=sys.stdout)) append_result(str({"error": "Unable to clean S3 bucket", "error_message": str(err) + "\northward Traceback: " + traceback.print_exc(file=sys.stdout)})) traceback.print_exc(file=sys.stdout)
Example 5
def get_object_count(bucket, prefix): attempt: s3_cli = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=args.region) content = s3_cli.get_paginator('list_objects') file_list = [] attempt: for i in content.paginate(Bucket=bucket, Delimiter='/', Prefix=prefix): for file in i.get('Contents'): file_list.append(file.get('Key')) count = len(file_list) except: print("{} even so not exist. Waiting...".format(prefix)) count = 0 return count except Exception as err: logging.error("Unable to get objects from s3: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
Example 6
def download_file_from_s3(bucket_name: str, key: str, local_path: str) -> None: """ Downloads file from S3 anonymously :param bucket_name: S3 Bucket proper name :param key: S3 File central name :param local_path: Local file path to download as """ verify_ssl = get_verify_ssl() if non os.path.isfile(local_path): customer = boto3.client( "s3", config=Config(signature_version=UNSIGNED), verify=verify_ssl ) effort: logger.info("Downloading S3 information file...") total = client.head_object(Bucket=bucket_name, Key=key)["ContentLength"] with ProgressPercentage(client, bucket_name, key, total) as Callback: client.download_file(bucket_name, fundamental, local_path, Callback=Callback) except ClientError: enhance KeyError(f"File {key} not bachelor in {bucket_name} bucket.") else: logger.info(f"Reusing cached file {local_path}...")
Example vii
def assume_role(cls, role_arn, principal_arn, saml_response, duration=3600): ''' Assumes the desired role using the saml_response given. The response should be b64 encoded. Elapsing is in seconds :param role_arn: role amazon resource name :param principal_arn: principal proper noun :param saml_response: SAML object to assume part with :param duration: session duration (default: 3600) :return: AWS session token ''' # Assume role with new SAML conn = boto3.client('sts', config=client.Config(signature_version=botocore.UNSIGNED, user_agent=cls.USER_AGENT, region_name=None)) aws_session_token = conn.assume_role_with_saml( RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=saml_response, DurationSeconds=elapsing, ) render aws_session_token
Case 8
def _make_boto3_athena_client(container): region = container.get_parameter('aws_region') logger = container.get('logger') config = botocore_client.Config( connect_timeout=five, read_timeout=v, region_name=region ) session_kwargs = {} endeavor: session = boto3.Session(**session_kwargs) return session.customer( 'athena', config=config, ) except ProfileNotFound: logger.error('AWS Athena Connection via Profile Failed')
Instance ix
def _make_boto3_kinesis_client(container): region = container.get_parameter('aws_region') logger = container.become('logger') config = botocore_client.Config( connect_timeout=v, read_timeout=5, region_name=region ) session_kwargs = {} endeavor: session = boto3.Session(**session_kwargs) return session.client('kinesis', config=config) except ProfileNotFound: logger.error('AWS Kinesis Connection via Contour Failed')
Example ten
def upload_export_tarball(self, realm: Optional[Realm], tarball_path: str) -> str: def percent_callback(bytes_transferred: Whatever) -> None: sys.stdout.write('.') sys.stdout.affluent() # We utilize the avatar bucket, because information technology's world-readable. key = self.avatar_bucket.Object(os.path.join("exports", generate_random_token(32), bone.path.basename(tarball_path))) primal.upload_file(tarball_path, Callback=percent_callback) session = botocore.session.get_session() config = Config(signature_version=botocore.UNSIGNED) public_url = session.create_client('s3', config=config).generate_presigned_url( 'get_object', Params={ 'Bucket': self.avatar_bucket.proper name, 'Key': cardinal.key, }, ExpiresIn=0, ) render public_url
Case 11
def test_only_dynamodb_calls_are_traced(): """Test but a single subsegment is created for other AWS services. As the pynamodb patch applies the botocore patch every bit well, we need to ensure that only one subsegment is created for all calls not fabricated by PynamoDB. As PynamoDB calls botocore differently than the botocore patch expects nosotros besides simply go a unmarried subsegment per PynamoDB call. """ session = botocore.session.get_session() s3 = session.create_client('s3', region_name='the states-west-2', config=Config(signature_version=UNSIGNED)) effort: s3.get_bucket_location(Bucket='mybucket') except ClientError: laissez passer subsegments = xray_recorder.current_segment().subsegments assert len(subsegments) == one assert subsegments[0].name == 's3' assert len(subsegments[0].subsegments) == 0
Example 12
def __init__(self, handle_task = lambda t, i: None, **kwargs): """Will not be chosen if used every bit a mixin. Provides just the expected variables. Args: handle_task (callable) : Callable to procedure task input and send success or failure kwargs : Arguments for heaviside.utils.create_session """ session, _ = create_session(**kwargs) # DP NOTE: read_timeout is needed so that the long poll for tasking doesn't # timeout client side before AWS returns that there is no work cocky.customer = session.client('stepfunctions', config=Config(read_timeout=70)) self.log = logging.getLogger(__name__) self.name = None self.arn = None self.handle_task = handle_task self.max_concurrent = 0 cocky.poll_delay = one cocky.polling = Simulated
Case 13
def __init__(self, proper noun, target=None, **kwargs): """ Args: proper name (cord): Name of the activity to monitor The activity's ARN is looked up in AWS using the provided AWS credentials target (string|callable): Role to pass to TaskProcess as the target, If string, the course / function will exist imported kwargs (dict): Same arguments as utils.create_session() """ super(ActivityProcess, cocky).__init__(name=proper noun) self.name = name cocky.credentials = kwargs cocky.session, self.account_id = create_session(**kwargs) cocky.client = self.session.client('stepfunctions', config=Config(read_timeout=70)) self.log = logging.getLogger(__name__) cocky.max_concurrent = 0 self.poll_delay = 1 if isinstance(target, str): target = TaskProcess.resolve_function(target) self.target = target
Example 14
def register_domain(domain=None, region=None): client = boto3.client( 'swf', region_name=region or config.REGION, config=Config(connect_timeout=config.CONNECT_TIMEOUT, read_timeout=config.READ_TIMEOUT)) # register domain for Mass try: res = client.register_domain( name=domain or config.DOMAIN, description='The SWF domain for Mass', workflowExecutionRetentionPeriodInDays=str( int(math.ceil(bladder(config.WORKFLOW_EXECUTION_START_TO_CLOSE_TIMEOUT) / 60 / 60 / 24))) ) except ClientError: # DomainAlreadyExists pass
Case 15
def register_activity_type(domain=None, region=None): client = boto3.client( 'swf', region_name=region or config.REGION, config=Config(connect_timeout=config.CONNECT_TIMEOUT, read_timeout=config.READ_TIMEOUT)) # register activity blazon for Cmd try: res = client.register_activity_type( domain=domain or config.DOMAIN, name=config.ACTIVITY_TYPE_FOR_ACTION['proper name'], version=config.ACTIVITY_TYPE_FOR_ACTION['version'], description='The SWF activity type for Cmd of Mass.', defaultTaskStartToCloseTimeout=str(config.ACTIVITY_TASK_START_TO_CLOSE_TIMEOUT), defaultTaskHeartbeatTimeout=str(config.ACTIVITY_HEARTBEAT_TIMEOUT), defaultTaskList={'name': config.ACTIVITY_TASK_LIST}, defaultTaskPriority='1', defaultTaskScheduleToStartTimeout=str(config.ACTIVITY_TASK_START_TO_CLOSE_TIMEOUT), defaultTaskScheduleToCloseTimeout=str(config.ACTIVITY_TASK_START_TO_CLOSE_TIMEOUT) ) except ClientError: # TypeAlreadyExists laissez passer
Example sixteen
def iter_workflow_execution_history(workflow_id, run_id, reverse_order=False, ignore_decision_task=True): customer = boto3.client( 'swf', region_name=config.REGION, config=Config(connect_timeout=config.CONNECT_TIMEOUT, read_timeout=config.READ_TIMEOUT)) paginator = client.get_paginator('get_workflow_execution_history') for res in paginator.paginate( domain=config.DOMAIN, execution={ 'workflowId': workflow_id, 'runId': run_id }, reverseOrder=reverse_order ): for event in res['events']: if ignore_decision_task and event['eventType'].startswith('DecisionTask'): go on yield upshot
Example 17
def primary(): config = Config(connect_timeout=60, read_timeout=60) session = boto3.Session(profile_name=None if len(sys.argv) < 2 else sys.argv[1]) amis = {} f = open("ami.yaml","w") for region in get_regions(session.client('ec2', region_name= 'us-east-1', config= config)): amis[region] = {"AMI": bastion_ami(session, config, region)} yaml=YAML() yaml.default_flow_style = False #Print AMI list, in yaml format, to concluding yaml.dump(amis, sys.stdout) #Dump AMI list in yaml format to a file yaml.dump(amis, f)
Example 18
def __init__(self, **settings): super().__init__(**settings) check_location(cocky) # Backward-compatibility: given the anteriority of the SECURE_URL setting # we autumn back to https if specified in gild to avert the construction # of unsecure urls. if self.secure_urls: self.url_protocol = 'https:' self._bucket = None self._connections = threading.local() self.access_key, cocky.secret_key = cocky._get_access_keys() self.security_token = self._get_security_token() if not self.config: cocky.config = Config( s3={'addressing_style': cocky.addressing_style}, signature_version=self.signature_version, proxies=self.proxies, )
Example 19
def create_client(stage_info, use_accelerate_endpoint=False): """Creates a client object with a phase credential. Args: stage_info: Information about the stage. use_accelerate_endpoint: Whether or non to utilize accelerated endpoint (Default value = False). Returns: The client to communicate with S3. """ logger = getLogger(__name__) stage_credentials = stage_info['creds'] security_token = stage_credentials.get('AWS_TOKEN', None) end_point = stage_info['endPoint'] logger.debug("AWS_KEY_ID: %south", stage_credentials['AWS_KEY_ID']) # if GS sends united states an endpoint, it's likely for FIPS. Utilise it. end_point = ('https://' + stage_info['endPoint']) if stage_info['endPoint'] else None config = Config( signature_version='s3v4', s3={ 'use_accelerate_endpoint': use_accelerate_endpoint, 'addressing_style': ADDRESSING_STYLE }) client = boto3.resource( 's3', region_name=stage_info['region'], aws_access_key_id=stage_credentials['AWS_KEY_ID'], aws_secret_access_key=stage_credentials['AWS_SECRET_KEY'], aws_session_token=security_token, endpoint_url=end_point, config=config, ) return customer
Case xx
def _native_download_file(meta, full_dst_file_name, max_concurrency): logger = getLogger(__name__) try: akey = SnowflakeS3Util._get_s3_object(meta, meta['src_file_name']) akey.download_file( full_dst_file_name, Callback=meta['get_callback']( meta['src_file_name'], meta['src_file_size'], output_stream=meta['get_callback_output_stream'], show_progress_bar=meta['show_progress_bar']) if meta['get_callback'] else None, Config=TransferConfig( multipart_threshold=SnowflakeS3Util.DATA_SIZE_THRESHOLD, max_concurrency=max_concurrency, num_download_attempts=10, ) ) meta['result_status'] = ResultStatus.DOWNLOADED except botocore.exceptions.ClientError equally err: if err.response['Mistake']['Code'] == EXPIRED_TOKEN: meta['result_status'] = ResultStatus.RENEW_TOKEN else: logger.debug( "Failed to download a file: %southward, err: %south", full_dst_file_name, err, exc_info=True) raise err except RetriesExceededError as err: meta['result_status'] = ResultStatus.NEED_RETRY meta['last_error'] = err except OpenSSL.SSL.SysCallError as err: meta['last_error'] = err if err.args[0] == ERRORNO_WSAECONNABORTED: # connection was disconnected by S3 # considering of likewise many connections. retry with # less concurrency to mitigate it meta[ 'result_status'] = ResultStatus.NEED_RETRY_WITH_LOWER_CONCURRENCY else: meta['result_status'] = ResultStatus.NEED_RETRY
Example 21
def boto3_agent_from_sts(agent_service, agent_type, region, credentials={}): session = boto3.session.Session() # Generate our kwargs to laissez passer kw_args = { "region_name": region, "config": Config(signature_version='s3v4') } if credentials: kw_args["aws_access_key_id"] = credentials['accessKeyId'] kw_args["aws_secret_access_key"] = credentials['secretAccessKey'] kw_args["aws_session_token"] = credentials['sessionToken'] # Build our agent depending on how nosotros're chosen. if agent_type == "client": render(session.customer( agent_service, **kw_args )) if agent_type == "resource": return(session.resources( agent_service, **kw_args ))
Example 22
def put_to_bucket(bucket_name, local_file, destination_file): effort: s3 = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=os.environ['aws_region']) with open(local_file, 'rb') as information: s3.upload_fileobj(data, bucket_name, destination_file, ExtraArgs={'ServerSideEncryption': 'AES256'}) return True except Exception as err: logging.info("Unable to upload files to S3 saucepan: " + str(err) + "\northward Traceback: " + traceback.print_exc( file=sys.stdout)) append_result(str({"error": "Unable to upload files to S3 bucket", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)})) traceback.print_exc(file=sys.stdout) return False
Example 23
def create_s3_bucket(bucket_name, bucket_tags, region, bucket_name_tag): try: s3 = boto3.resource('s3', config=Config(signature_version='s3v4')) if region == "us-due east-1": saucepan = s3.create_bucket(Bucket=bucket_name) else: bucket = s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) boto3.customer('s3', config=Config(signature_version='s3v4')).put_bucket_encryption( Bucket=bucket_name, ServerSideEncryptionConfiguration={ 'Rules': [ { 'ApplyServerSideEncryptionByDefault': { 'SSEAlgorithm': 'AES256' } }, ] }) tags = list() tags.append({'Key': os.environ['conf_tag_resource_id'], 'Value': os.environ['conf_service_base_name'] + ':' + bucket_name_tag}) for tag in bucket_tags.dissever(','): tags.append( { 'Central': tag.split(':')[0], 'Value': tag.split(':')[1] } ) tagging = bucket.Tagging() tagging.put(Tagging={'TagSet': tags}) tagging.reload() return bucket.name except Exception as err: logging.info("Unable to create S3 bucket: " + str(err) + "\north Traceback: " + traceback.print_exc( file=sys.stdout)) append_result(str({"fault": "Unable to create S3 bucket", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)})) traceback.print_exc(file=sys.stdout)
Example 24
def remove_s3(bucket_type='all', scientist=''): try: client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=os.environ['aws_region']) s3 = boto3.resources('s3') bucket_list = [] if bucket_type == 'ssn': bucket_name = (os.environ['conf_service_base_name'] + '-ssn-bucket').lower().replace('_', '-') bucket_list.append(('{0}-{i}-shared-bucket'.format(bone.environ['conf_service_base_name'], os.environ['default_endpoint_name'])).lower().replace('_', '-')) elif bucket_type == 'edge': bucket_name = (os.environ['conf_service_base_name'] + '-' + "{}".format(scientist) + '-' + bone.environ['endpoint_name'] + '-bucket').lower().supervene upon('_', '-') else: bucket_name = (bone.environ['conf_service_base_name']).lower().replace('_', '-') for particular in customer.list_buckets().go('Buckets'): if bucket_name in item.become('Name'): for i in client.get_bucket_tagging(Bucket=particular.become('Proper name')).get('TagSet'): i.go('Key') if i.get('Primal') == os.environ['conf_service_base_name'].lower() + '-tag': bucket_list.append(particular.get('Name')) for s3bucket in bucket_list: if s3bucket: bucket = s3.Bucket(s3bucket) saucepan.objects.all().delete() impress("The S3 bucket {} has been cleaned".format(s3bucket)) client.delete_bucket(Bucket=s3bucket) print("The S3 saucepan {} has been deleted successfully".format(s3bucket)) else: print("In that location are no buckets to delete") except Exception as err: logging.info("Unable to remove S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc( file=sys.stdout)) append_result(str({"error": "Unable to remove S3 bucket", "error_message": str(err) + "\northward Traceback: " + traceback.print_exc(file=sys.stdout)})) traceback.print_exc(file=sys.stdout)
Example 25
def install_emr_spark(args): s3_client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=args.region) s3_client.download_file(args.saucepan, args.project_name + '/' + args.cluster_name + '/spark.tar.gz', '/tmp/spark.tar.gz') s3_client.download_file(args.bucket, args.project_name + '/' + args.cluster_name + '/spark-checksum.chk', '/tmp/spark-checksum.chk') if 'Warning' in local('md5sum -c /tmp/spark-checksum.chk', capture=Truthful): local('rm -f /tmp/spark.tar.gz') s3_client.download_file(args.bucket, args.project_name + '/' + args.cluster_name + '/spark.tar.gz', '/tmp/spark.tar.gz') if 'WARNING' in local('md5sum -c /tmp/spark-checksum.chk', capture=True): impress("The checksum of spark.tar.gz is mismatched. Information technology could be caused by aws network issue.") sys.get out(1) local('sudo tar -zhxvf /tmp/spark.tar.gz -C /opt/' + args.emr_version + '/' + args.cluster_name + '/')
Example 26
def yarn(args, yarn_dir): impress("Downloading yarn configuration...") if args.region == 'cn-north-one': s3client = boto3.client('s3', config=Config(signature_version='s3v4'), endpoint_url='https://s3.cn-north-1.amazonaws.com.cn', region_name=args.region) s3resource = boto3.resource('s3', config=Config(signature_version='s3v4'), endpoint_url='https://s3.cn-due north-i.amazonaws.com.cn', region_name=args.region) else: s3client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=args.region) s3resource = boto3.resources('s3', config=Config(signature_version='s3v4')) get_files(s3client, s3resource, args.project_name + '/' + args.cluster_name + '/config/', args.bucket, yarn_dir) local('sudo mv ' + yarn_dir + args.project_name + '/' + args.cluster_name + '/config/* ' + yarn_dir) local('sudo rm -rf ' + yarn_dir + args.project_name + '/')
Example 27
def get_cluster_python_version(region, bucket, user_name, cluster_name): s3_client = boto3.customer('s3', config=Config(signature_version='s3v4'), region_name=region) s3_client.download_file(bucket, user_name + '/' + cluster_name + '/python_version', '/tmp/python_version')
Example 28
def get_bucket_by_name(bucket_name): try: s3 = boto3.resource('s3', config=Config(signature_version='s3v4')) for bucket in s3.buckets.all(): if bucket.proper name == bucket_name: return bucket.proper name render '' except Exception as err: logging.error("Error with getting bucket by name: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)) append_result(str({"mistake": "Fault with getting bucket by proper noun", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)})) traceback.print_exc(file=sys.stdout)
Example 29
def upload_jars_parser(args): endeavour: s3 = boto3.resources('s3', config=Config(signature_version='s3v4')) s3.meta.customer.upload_file('/root/scripts/dataengine-service_jars_parser.py', args.s3_bucket, 'jars_parser.py', ExtraArgs={'ServerSideEncryption': 'AES256'}) except Exception as err: logging.fault("Unable to upload jars to s3: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
Example 30
def remove_user_key(args): effort: client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=args.region) client.delete_object(Bucket=args.s3_bucket, Primal=args.project_name + '.pub') except Exception as err: logging.error("Unable to remove user key: " + str(err) + "\northward Traceback: " + traceback.print_exc(file=sys.stdout))
Source: https://www.programcreek.com/python/example/104338/botocore.client.Config
With technological improvements, practically every multimedia gadget now includes Bluetooth, making it easy to link several devices. Bluetooth is a clever medium for transferring files from one device to another. Working on a Windows 10 PC is also more convenient because you can use Bluetooth to connect any device to your PC. If your PC lacks Bluetooth, getting a Windows 10 Bluetooth Driver would be beneficial. If you're wondering How To Turn On Bluetooth On Windows 10 or want to upgrade your Bluetooth driver, these numerous techniques can help. Thus, follow these simple methods to quickly fix the Bluetooth on your device.
ReplyDeletehow to turn on bluetooth on windows 10