body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def _canonicalize_experiment(exp): 'Sorts the repeated fields of an Experiment message.' exp.hparam_infos.sort(key=operator.attrgetter('name')) exp.metric_infos.sort(key=operator.attrgetter('name.group', 'name.tag')) for hparam_info in exp.hparam_infos: if hparam_info.HasField('domain_discrete'): hparam_info.domain_discrete.values.sort(key=operator.attrgetter('string_value'))
-8,215,901,732,217,587,000
Sorts the repeated fields of an Experiment message.
tensorboard/plugins/hparams/backend_context_test.py
_canonicalize_experiment
aryaman4/tensorboard
python
def _canonicalize_experiment(exp): exp.hparam_infos.sort(key=operator.attrgetter('name')) exp.metric_infos.sort(key=operator.attrgetter('name.group', 'name.tag')) for hparam_info in exp.hparam_infos: if hparam_info.HasField('domain_discrete'): hparam_info.domain_discrete.values.sort(key=operator.attrgetter('string_value'))
def __init__(self, request_id=None, return_code=None, return_message=None, total_rows=None, process_list=None): 'ResumeProcessesResponse - a model defined in Swagger' self._request_id = None self._return_code = None self._return_message = None self._total_rows = None self._process_list = None self.discriminator = None if (request_id is not None): self.request_id = request_id if (return_code is not None): self.return_code = return_code if (return_message is not None): self.return_message = return_message if (total_rows is not None): self.total_rows = total_rows if (process_list is not None): self.process_list = process_list
2,894,216,753,114,153,500
ResumeProcessesResponse - a model defined in Swagger
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
__init__
NaverCloudPlatform/ncloud-sdk-python
python
def __init__(self, request_id=None, return_code=None, return_message=None, total_rows=None, process_list=None): self._request_id = None self._return_code = None self._return_message = None self._total_rows = None self._process_list = None self.discriminator = None if (request_id is not None): self.request_id = request_id if (return_code is not None): self.return_code = return_code if (return_message is not None): self.return_message = return_message if (total_rows is not None): self.total_rows = total_rows if (process_list is not None): self.process_list = process_list
@property def request_id(self): 'Gets the request_id of this ResumeProcessesResponse. # noqa: E501\n\n\n :return: The request_id of this ResumeProcessesResponse. # noqa: E501\n :rtype: str\n ' return self._request_id
-1,183,890,875,359,341,300
Gets the request_id of this ResumeProcessesResponse. # noqa: E501 :return: The request_id of this ResumeProcessesResponse. # noqa: E501 :rtype: str
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
request_id
NaverCloudPlatform/ncloud-sdk-python
python
@property def request_id(self): 'Gets the request_id of this ResumeProcessesResponse. # noqa: E501\n\n\n :return: The request_id of this ResumeProcessesResponse. # noqa: E501\n :rtype: str\n ' return self._request_id
@request_id.setter def request_id(self, request_id): 'Sets the request_id of this ResumeProcessesResponse.\n\n\n :param request_id: The request_id of this ResumeProcessesResponse. # noqa: E501\n :type: str\n ' self._request_id = request_id
-8,333,445,982,014,422,000
Sets the request_id of this ResumeProcessesResponse. :param request_id: The request_id of this ResumeProcessesResponse. # noqa: E501 :type: str
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
request_id
NaverCloudPlatform/ncloud-sdk-python
python
@request_id.setter def request_id(self, request_id): 'Sets the request_id of this ResumeProcessesResponse.\n\n\n :param request_id: The request_id of this ResumeProcessesResponse. # noqa: E501\n :type: str\n ' self._request_id = request_id
@property def return_code(self): 'Gets the return_code of this ResumeProcessesResponse. # noqa: E501\n\n\n :return: The return_code of this ResumeProcessesResponse. # noqa: E501\n :rtype: str\n ' return self._return_code
5,002,841,652,600,358,000
Gets the return_code of this ResumeProcessesResponse. # noqa: E501 :return: The return_code of this ResumeProcessesResponse. # noqa: E501 :rtype: str
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
return_code
NaverCloudPlatform/ncloud-sdk-python
python
@property def return_code(self): 'Gets the return_code of this ResumeProcessesResponse. # noqa: E501\n\n\n :return: The return_code of this ResumeProcessesResponse. # noqa: E501\n :rtype: str\n ' return self._return_code
@return_code.setter def return_code(self, return_code): 'Sets the return_code of this ResumeProcessesResponse.\n\n\n :param return_code: The return_code of this ResumeProcessesResponse. # noqa: E501\n :type: str\n ' self._return_code = return_code
1,679,400,526,842,856,200
Sets the return_code of this ResumeProcessesResponse. :param return_code: The return_code of this ResumeProcessesResponse. # noqa: E501 :type: str
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
return_code
NaverCloudPlatform/ncloud-sdk-python
python
@return_code.setter def return_code(self, return_code): 'Sets the return_code of this ResumeProcessesResponse.\n\n\n :param return_code: The return_code of this ResumeProcessesResponse. # noqa: E501\n :type: str\n ' self._return_code = return_code
@property def return_message(self): 'Gets the return_message of this ResumeProcessesResponse. # noqa: E501\n\n\n :return: The return_message of this ResumeProcessesResponse. # noqa: E501\n :rtype: str\n ' return self._return_message
-5,566,010,396,584,428,000
Gets the return_message of this ResumeProcessesResponse. # noqa: E501 :return: The return_message of this ResumeProcessesResponse. # noqa: E501 :rtype: str
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
return_message
NaverCloudPlatform/ncloud-sdk-python
python
@property def return_message(self): 'Gets the return_message of this ResumeProcessesResponse. # noqa: E501\n\n\n :return: The return_message of this ResumeProcessesResponse. # noqa: E501\n :rtype: str\n ' return self._return_message
@return_message.setter def return_message(self, return_message): 'Sets the return_message of this ResumeProcessesResponse.\n\n\n :param return_message: The return_message of this ResumeProcessesResponse. # noqa: E501\n :type: str\n ' self._return_message = return_message
8,625,067,697,036,527,000
Sets the return_message of this ResumeProcessesResponse. :param return_message: The return_message of this ResumeProcessesResponse. # noqa: E501 :type: str
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
return_message
NaverCloudPlatform/ncloud-sdk-python
python
@return_message.setter def return_message(self, return_message): 'Sets the return_message of this ResumeProcessesResponse.\n\n\n :param return_message: The return_message of this ResumeProcessesResponse. # noqa: E501\n :type: str\n ' self._return_message = return_message
@property def total_rows(self): 'Gets the total_rows of this ResumeProcessesResponse. # noqa: E501\n\n\n :return: The total_rows of this ResumeProcessesResponse. # noqa: E501\n :rtype: int\n ' return self._total_rows
8,620,200,007,391,291,000
Gets the total_rows of this ResumeProcessesResponse. # noqa: E501 :return: The total_rows of this ResumeProcessesResponse. # noqa: E501 :rtype: int
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
total_rows
NaverCloudPlatform/ncloud-sdk-python
python
@property def total_rows(self): 'Gets the total_rows of this ResumeProcessesResponse. # noqa: E501\n\n\n :return: The total_rows of this ResumeProcessesResponse. # noqa: E501\n :rtype: int\n ' return self._total_rows
@total_rows.setter def total_rows(self, total_rows): 'Sets the total_rows of this ResumeProcessesResponse.\n\n\n :param total_rows: The total_rows of this ResumeProcessesResponse. # noqa: E501\n :type: int\n ' self._total_rows = total_rows
-3,135,320,641,953,777,000
Sets the total_rows of this ResumeProcessesResponse. :param total_rows: The total_rows of this ResumeProcessesResponse. # noqa: E501 :type: int
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
total_rows
NaverCloudPlatform/ncloud-sdk-python
python
@total_rows.setter def total_rows(self, total_rows): 'Sets the total_rows of this ResumeProcessesResponse.\n\n\n :param total_rows: The total_rows of this ResumeProcessesResponse. # noqa: E501\n :type: int\n ' self._total_rows = total_rows
@property def process_list(self): 'Gets the process_list of this ResumeProcessesResponse. # noqa: E501\n\n\n :return: The process_list of this ResumeProcessesResponse. # noqa: E501\n :rtype: list[Process]\n ' return self._process_list
3,722,111,833,422,468,600
Gets the process_list of this ResumeProcessesResponse. # noqa: E501 :return: The process_list of this ResumeProcessesResponse. # noqa: E501 :rtype: list[Process]
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
process_list
NaverCloudPlatform/ncloud-sdk-python
python
@property def process_list(self): 'Gets the process_list of this ResumeProcessesResponse. # noqa: E501\n\n\n :return: The process_list of this ResumeProcessesResponse. # noqa: E501\n :rtype: list[Process]\n ' return self._process_list
@process_list.setter def process_list(self, process_list): 'Sets the process_list of this ResumeProcessesResponse.\n\n\n :param process_list: The process_list of this ResumeProcessesResponse. # noqa: E501\n :type: list[Process]\n ' self._process_list = process_list
6,121,710,043,043,419,000
Sets the process_list of this ResumeProcessesResponse. :param process_list: The process_list of this ResumeProcessesResponse. # noqa: E501 :type: list[Process]
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
process_list
NaverCloudPlatform/ncloud-sdk-python
python
@process_list.setter def process_list(self, process_list): 'Sets the process_list of this ResumeProcessesResponse.\n\n\n :param process_list: The process_list of this ResumeProcessesResponse. # noqa: E501\n :type: list[Process]\n ' self._process_list = process_list
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
-2,772,352,302,133,010,000
Returns the model properties as a dict
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
to_dict
NaverCloudPlatform/ncloud-sdk-python
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
5,849,158,643,760,736,000
Returns the string representation of the model
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
to_str
NaverCloudPlatform/ncloud-sdk-python
python
def to_str(self): return pprint.pformat(self.to_dict())
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
-8,960,031,694,814,905,000
For `print` and `pprint`
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
__repr__
NaverCloudPlatform/ncloud-sdk-python
python
def __repr__(self): return self.to_str()
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, ResumeProcessesResponse)): return False return (self.__dict__ == other.__dict__)
-7,900,360,667,409,191,000
Returns true if both objects are equal
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
__eq__
NaverCloudPlatform/ncloud-sdk-python
python
def __eq__(self, other): if (not isinstance(other, ResumeProcessesResponse)): return False return (self.__dict__ == other.__dict__)
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
7,764,124,047,908,058,000
Returns true if both objects are not equal
lib/services/vautoscaling/ncloud_vautoscaling/model/resume_processes_response.py
__ne__
NaverCloudPlatform/ncloud-sdk-python
python
def __ne__(self, other): return (not (self == other))
def query_countries(countries: List[str]=[], country_ids: List[str]=[]) -> List[Region]: ' Returns a list of countries:\n If countries or country_ids are not empty, only those countries are returned (all of those in both lists)\n Otherwise, all countries are returned\n ' where = region_where_clause('s_country_label.text', countries, 'e_country.node1', country_ids) query = f''' SELECT e_country.node1 AS admin_id, s_country_label.text AS admin, 'Q6256' AS region_type, e_country.node1 AS country_id, s_country_label.text AS country, NULL as admin1_id, NULL as admin1, NULL as admin2_id, NULL as admin2, NULL as admin3_id, NULL as admin3 FROM edges e_country JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id) ON (e_country.node1=e_country_label.node1 AND e_country_label.label='label') WHERE e_country.label='P31' AND e_country.node2='Q6256' AND {where} ORDER BY country ''' return _query_regions(query)
-6,755,750,796,964,314,000
Returns a list of countries: If countries or country_ids are not empty, only those countries are returned (all of those in both lists) Otherwise, all countries are returned
db/sql/dal/regions.py
query_countries
Otamio/datamart-api
python
def query_countries(countries: List[str]=[], country_ids: List[str]=[]) -> List[Region]: ' Returns a list of countries:\n If countries or country_ids are not empty, only those countries are returned (all of those in both lists)\n Otherwise, all countries are returned\n ' where = region_where_clause('s_country_label.text', countries, 'e_country.node1', country_ids) query = f' SELECT e_country.node1 AS admin_id, s_country_label.text AS admin, 'Q6256' AS region_type, e_country.node1 AS country_id, s_country_label.text AS country, NULL as admin1_id, NULL as admin1, NULL as admin2_id, NULL as admin2, NULL as admin3_id, NULL as admin3 FROM edges e_country JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id) ON (e_country.node1=e_country_label.node1 AND e_country_label.label='label') WHERE e_country.label='P31' AND e_country.node2='Q6256' AND {where} ORDER BY country ' return _query_regions(query)
def query_admin1s(country: Optional[str]=None, country_id: Optional[str]=None, admin1s: List[str]=[], admin1_ids: List[str]=[]) -> List[Region]: '\n Returns a list of admin1s. If country or country_id is specified, return the admin1s only of that country.\n If admin1s or admin1_ids are provided, only those admins are returned.\n If all arguments are empty, all admin1s in the system are returned.\n ' if (country and country_id): raise ValueError('Only one of country, country_id may be specified') if country_id: country_where = f"e_country.node2='{country_id}'" elif country: country_where = f"LOWER(s_country_label.text)='{country.lower()}'" else: country_where = '1=1' admin1_where = region_where_clause('s_admin1_label.text', admin1s, 'e_admin1.node1', admin1_ids) query = f''' SELECT e_admin1.node1 AS admin_id, s_admin1_label.text AS admin, 'Q10864048' AS region_type, e_country.node2 AS country_id, s_country_label.text AS country, e_admin1.node1 as admin1_id, s_admin1_label.text as admin1, NULL as admin2_id, NULL as admin2, NULL as admin3_id, NULL as admin3 FROM edges e_admin1 JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id) ON (e_admin1.node1=e_admin1_label.node1 AND e_admin1_label.label='label') JOIN edges e_country ON (e_country.node1=e_admin1.node1 AND e_country.label='P17') JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id) ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label') WHERE e_admin1.label='P31' AND e_admin1.node2='Q10864048' AND {country_where} AND {admin1_where} ORDER BY admin1 ''' return _query_regions(query)
5,190,282,231,298,423,000
Returns a list of admin1s. If country or country_id is specified, return the admin1s only of that country. If admin1s or admin1_ids are provided, only those admins are returned. If all arguments are empty, all admin1s in the system are returned.
db/sql/dal/regions.py
query_admin1s
Otamio/datamart-api
python
def query_admin1s(country: Optional[str]=None, country_id: Optional[str]=None, admin1s: List[str]=[], admin1_ids: List[str]=[]) -> List[Region]: '\n Returns a list of admin1s. If country or country_id is specified, return the admin1s only of that country.\n If admin1s or admin1_ids are provided, only those admins are returned.\n If all arguments are empty, all admin1s in the system are returned.\n ' if (country and country_id): raise ValueError('Only one of country, country_id may be specified') if country_id: country_where = f"e_country.node2='{country_id}'" elif country: country_where = f"LOWER(s_country_label.text)='{country.lower()}'" else: country_where = '1=1' admin1_where = region_where_clause('s_admin1_label.text', admin1s, 'e_admin1.node1', admin1_ids) query = f' SELECT e_admin1.node1 AS admin_id, s_admin1_label.text AS admin, 'Q10864048' AS region_type, e_country.node2 AS country_id, s_country_label.text AS country, e_admin1.node1 as admin1_id, s_admin1_label.text as admin1, NULL as admin2_id, NULL as admin2, NULL as admin3_id, NULL as admin3 FROM edges e_admin1 JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id) ON (e_admin1.node1=e_admin1_label.node1 AND e_admin1_label.label='label') JOIN edges e_country ON (e_country.node1=e_admin1.node1 AND e_country.label='P17') JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id) ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label') WHERE e_admin1.label='P31' AND e_admin1.node2='Q10864048' AND {country_where} AND {admin1_where} ORDER BY admin1 ' return _query_regions(query)
def query_admin2s(admin1: Optional[str]=None, admin1_id: Optional[str]=None, admin2s: List[str]=[], admin2_ids: List[str]=[]) -> List[Region]: '\n Returns a list of admin2s. If admin1 or admin1_id is specified, return the admin2s only of that admin1.\n If admin2s or admin2_ids are provided, only those admins are returned.\n If all arguments are empty, all admin2s in the system are returned.\n ' if (admin1 and admin1_id): raise ValueError('Only one of admin1, admin1_id may be specified') if admin1_id: admin1_where = f"e_admin1.node2='{admin1_id}'" elif admin1: admin1_where = f"LOWER(s_admin1_label.text)=LOWER('{admin1}')" else: admin1_where = '1=1' admin2_where = region_where_clause('s_admin2_label.text', admin2s, 'e_admin2.node1', admin2_ids) query = f''' SELECT e_admin2.node1 AS admin_id, s_admin2_label.text AS admin, 'Q13220204' AS region_type, e_country.node2 AS country_id, s_country_label.text AS country, e_admin1.node2 AS admin1_id, s_admin1_label.text AS admin1, e_admin2.node1 AS admin2_id, s_admin2_label.text AS admin2, NULL as admin3_id, NULL as admin3 FROM edges e_admin2 JOIN edges e_admin2_label JOIN strings s_admin2_label ON (e_admin2_label.id=s_admin2_label.edge_id) ON (e_admin2.node1=e_admin2_label.node1 AND e_admin2_label.label='label') JOIN edges e_admin1 ON (e_admin1.node1=e_admin2.node1 AND e_admin1.label='P2006190001') JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id) ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label') JOIN edges e_country ON (e_country.node1=e_admin1.node2 AND e_country.label='P17') JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id) ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label') WHERE e_admin2.label='P31' AND e_admin2.node2='Q13220204' AND {admin1_where} AND {admin2_where} ORDER BY admin2 ''' return _query_regions(query)
-1,743,948,908,563,666,400
Returns a list of admin2s. If admin1 or admin1_id is specified, return the admin2s only of that admin1. If admin2s or admin2_ids are provided, only those admins are returned. If all arguments are empty, all admin2s in the system are returned.
db/sql/dal/regions.py
query_admin2s
Otamio/datamart-api
python
def query_admin2s(admin1: Optional[str]=None, admin1_id: Optional[str]=None, admin2s: List[str]=[], admin2_ids: List[str]=[]) -> List[Region]: '\n Returns a list of admin2s. If admin1 or admin1_id is specified, return the admin2s only of that admin1.\n If admin2s or admin2_ids are provided, only those admins are returned.\n If all arguments are empty, all admin2s in the system are returned.\n ' if (admin1 and admin1_id): raise ValueError('Only one of admin1, admin1_id may be specified') if admin1_id: admin1_where = f"e_admin1.node2='{admin1_id}'" elif admin1: admin1_where = f"LOWER(s_admin1_label.text)=LOWER('{admin1}')" else: admin1_where = '1=1' admin2_where = region_where_clause('s_admin2_label.text', admin2s, 'e_admin2.node1', admin2_ids) query = f' SELECT e_admin2.node1 AS admin_id, s_admin2_label.text AS admin, 'Q13220204' AS region_type, e_country.node2 AS country_id, s_country_label.text AS country, e_admin1.node2 AS admin1_id, s_admin1_label.text AS admin1, e_admin2.node1 AS admin2_id, s_admin2_label.text AS admin2, NULL as admin3_id, NULL as admin3 FROM edges e_admin2 JOIN edges e_admin2_label JOIN strings s_admin2_label ON (e_admin2_label.id=s_admin2_label.edge_id) ON (e_admin2.node1=e_admin2_label.node1 AND e_admin2_label.label='label') JOIN edges e_admin1 ON (e_admin1.node1=e_admin2.node1 AND e_admin1.label='P2006190001') JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id) ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label') JOIN edges e_country ON (e_country.node1=e_admin1.node2 AND e_country.label='P17') JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id) ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label') WHERE e_admin2.label='P31' AND e_admin2.node2='Q13220204' AND {admin1_where} AND {admin2_where} ORDER BY admin2 ' return _query_regions(query)
def query_admin3s(admin2: Optional[str]=None, admin2_id: Optional[str]=None, admin3s: List[str]=[], admin3_ids: List[str]=[], debug=False) -> List[Region]: '\n Returns a list of admin3s. If admin2 or admin2_id is specified, return the admin3s only of that admin2.\n If admin3s or admin3_ids are provided, only those admins are returned.\n If all arguments are empty, all admin3s in the system are returned.\n ' if (admin2 and admin2_id): raise ValueError('Only one of admin2, admin2_id may be specified') if admin2_id: admin2_where = f"e_admin2.node2='{admin2_id}'" elif admin2: admin2_where = f"LOWER(s_admin2_label.text)=LOWER('{admin2}')" else: admin2_where = '1=1' admin3_where = region_where_clause('s_admin3_label.text', admin3s, 'e_admin3.node1', admin3_ids) query = f''' SELECT e_admin3.node1 AS admin_id, s_admin3_label.text AS admin, 'Q13221722' AS region_type, e_country.node2 AS country_id, s_country_label.text AS country, e_admin1.node2 AS admin1_id, s_admin1_label.text AS admin1, e_admin2.node2 AS admin2_id, s_admin2_label.text AS admin2, e_admin2.node1 AS admin3_id, s_admin3_label.text AS admin3 FROM edges e_admin3 JOIN edges e_admin3_label JOIN strings s_admin3_label ON (e_admin3_label.id=s_admin3_label.edge_id) ON (e_admin3.node1=e_admin3_label.node1 AND e_admin3_label.label='label') JOIN edges e_admin2 ON (e_admin2.node1=e_admin3.node1 AND e_admin2.label='P2006190002') JOIN edges e_admin2_label JOIN strings s_admin2_label ON (e_admin2_label.id=s_admin2_label.edge_id) ON (e_admin2.node2=e_admin2_label.node1 AND e_admin2_label.label='label') JOIN edges e_admin1 ON (e_admin1.node1=e_admin2.node1 AND e_admin1.label='P2006190001') JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id) ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label') JOIN edges e_country ON (e_country.node1=e_admin1.node2 AND e_country.label='P17') JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id) ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label') WHERE e_admin3.label='P31' AND e_admin3.node2='Q13221722' AND {admin2_where} AND {admin3_where} ORDER BY admin3 ''' if debug: print(query) return _query_regions(query)
-7,311,637,996,641,695,000
Returns a list of admin3s. If admin2 or admin2_id is specified, return the admin3s only of that admin2. If admin3s or admin3_ids are provided, only those admins are returned. If all arguments are empty, all admin3s in the system are returned.
db/sql/dal/regions.py
query_admin3s
Otamio/datamart-api
python
def query_admin3s(admin2: Optional[str]=None, admin2_id: Optional[str]=None, admin3s: List[str]=[], admin3_ids: List[str]=[], debug=False) -> List[Region]: '\n Returns a list of admin3s. If admin2 or admin2_id is specified, return the admin3s only of that admin2.\n If admin3s or admin3_ids are provided, only those admins are returned.\n If all arguments are empty, all admin3s in the system are returned.\n ' if (admin2 and admin2_id): raise ValueError('Only one of admin2, admin2_id may be specified') if admin2_id: admin2_where = f"e_admin2.node2='{admin2_id}'" elif admin2: admin2_where = f"LOWER(s_admin2_label.text)=LOWER('{admin2}')" else: admin2_where = '1=1' admin3_where = region_where_clause('s_admin3_label.text', admin3s, 'e_admin3.node1', admin3_ids) query = f' SELECT e_admin3.node1 AS admin_id, s_admin3_label.text AS admin, 'Q13221722' AS region_type, e_country.node2 AS country_id, s_country_label.text AS country, e_admin1.node2 AS admin1_id, s_admin1_label.text AS admin1, e_admin2.node2 AS admin2_id, s_admin2_label.text AS admin2, e_admin2.node1 AS admin3_id, s_admin3_label.text AS admin3 FROM edges e_admin3 JOIN edges e_admin3_label JOIN strings s_admin3_label ON (e_admin3_label.id=s_admin3_label.edge_id) ON (e_admin3.node1=e_admin3_label.node1 AND e_admin3_label.label='label') JOIN edges e_admin2 ON (e_admin2.node1=e_admin3.node1 AND e_admin2.label='P2006190002') JOIN edges e_admin2_label JOIN strings s_admin2_label ON (e_admin2_label.id=s_admin2_label.edge_id) ON (e_admin2.node2=e_admin2_label.node1 AND e_admin2_label.label='label') JOIN edges e_admin1 ON (e_admin1.node1=e_admin2.node1 AND e_admin1.label='P2006190001') JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id) ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label') JOIN edges e_country ON (e_country.node1=e_admin1.node2 AND e_country.label='P17') JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id) ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label') WHERE e_admin3.label='P31' AND e_admin3.node2='Q13221722' AND {admin2_where} AND {admin3_where} ORDER BY admin3 ' if debug: print(query) return _query_regions(query)
@simple_decorator def error2fault(func): '\n Catch known exceptions and translate them to\n XML-RPC faults.\n ' def catcher(*args): try: return func(*args) except GameError as error: raise xmlrpc.client.Fault(GameError.rpc_code, str(error)) except RuleError as error: raise xmlrpc.client.Fault(RuleError.rpc_code, str(error)) except ProtocolError as error: raise xmlrpc.client.Fault(ProtocolError.rpc_code, str(error)) return catcher
6,223,366,847,108,657,000
Catch known exceptions and translate them to XML-RPC faults.
tupelo/xmlrpc.py
error2fault
jait/tupelo
python
@simple_decorator def error2fault(func): '\n Catch known exceptions and translate them to\n XML-RPC faults.\n ' def catcher(*args): try: return func(*args) except GameError as error: raise xmlrpc.client.Fault(GameError.rpc_code, str(error)) except RuleError as error: raise xmlrpc.client.Fault(RuleError.rpc_code, str(error)) except ProtocolError as error: raise xmlrpc.client.Fault(ProtocolError.rpc_code, str(error)) return catcher
@simple_decorator def fault2error(func): '\n Catch known XML-RPC faults and translate them to\n custom exceptions.\n ' def catcher(*args): try: return func(*args) except xmlrpc.client.Fault as error: error_classes = (GameError, RuleError, ProtocolError) for klass in error_classes: if (error.faultCode == klass.rpc_code): raise klass(error.faultString) raise error return catcher
550,723,065,045,873,660
Catch known XML-RPC faults and translate them to custom exceptions.
tupelo/xmlrpc.py
fault2error
jait/tupelo
python
@simple_decorator def fault2error(func): '\n Catch known XML-RPC faults and translate them to\n custom exceptions.\n ' def catcher(*args): try: return func(*args) except xmlrpc.client.Fault as error: error_classes = (GameError, RuleError, ProtocolError) for klass in error_classes: if (error.faultCode == klass.rpc_code): raise klass(error.faultString) raise error return catcher
def wait_for_turn(self): "\n Wait for this player's turn.\n " while True: time.sleep(0.5) if (self.controller is not None): events = self.controller.get_events(self.id) for event in events: self.handle_event(event) if (self.game_state.turn_id == self.id): break
-4,054,009,647,122,734,600
Wait for this player's turn.
tupelo/xmlrpc.py
wait_for_turn
jait/tupelo
python
def wait_for_turn(self): "\n \n " while True: time.sleep(0.5) if (self.controller is not None): events = self.controller.get_events(self.id) for event in events: self.handle_event(event) if (self.game_state.turn_id == self.id): break
def add_collision_mesh(self, collision_mesh, options=None): 'Add a collision mesh to the planning scene.\n\n Parameters\n ----------\n collision_mesh : :class:`compas_fab.robots.CollisionMesh`\n Object containing the collision mesh to be added.\n options : dict, optional\n Unused parameter.\n\n Returns\n -------\n ``None``\n ' kwargs = {} kwargs['collision_mesh'] = collision_mesh kwargs['errback_name'] = 'errback' return await_callback(self.add_collision_mesh_async, **kwargs)
-7,232,787,266,132,847,000
Add a collision mesh to the planning scene. Parameters ---------- collision_mesh : :class:`compas_fab.robots.CollisionMesh` Object containing the collision mesh to be added. options : dict, optional Unused parameter. Returns ------- ``None``
src/compas_fab/backends/ros/backend_features/move_it_add_collision_mesh.py
add_collision_mesh
gramaziokohler/compas_fab
python
def add_collision_mesh(self, collision_mesh, options=None): 'Add a collision mesh to the planning scene.\n\n Parameters\n ----------\n collision_mesh : :class:`compas_fab.robots.CollisionMesh`\n Object containing the collision mesh to be added.\n options : dict, optional\n Unused parameter.\n\n Returns\n -------\n ``None``\n ' kwargs = {} kwargs['collision_mesh'] = collision_mesh kwargs['errback_name'] = 'errback' return await_callback(self.add_collision_mesh_async, **kwargs)
def coinChange(self, coins, amount): '\n :type coins: List[int]\n :type amount: int\n :rtype: int\n ' res = ([(amount + 1)] * (amount + 1)) res[0] = 0 for i in range(1, (amount + 1)): for j in coins: if (j <= i): res[i] = min(res[i], (res[(i - j)] + 1)) if (res[amount] > amount): return (- 1) else: return res[amount]
8,912,028,627,762,102,000
:type coins: List[int] :type amount: int :rtype: int
Session1_2018/coinChange.py
coinChange
vedantc6/LCode
python
def coinChange(self, coins, amount): '\n :type coins: List[int]\n :type amount: int\n :rtype: int\n ' res = ([(amount + 1)] * (amount + 1)) res[0] = 0 for i in range(1, (amount + 1)): for j in coins: if (j <= i): res[i] = min(res[i], (res[(i - j)] + 1)) if (res[amount] > amount): return (- 1) else: return res[amount]
@interpolate_doc def func(): '\n this is a docstring\n\n {interpolate_example.foo}\n\n {bar}\n\n {Foo!K}\n '
6,285,249,781,807,159,000
this is a docstring {interpolate_example.foo} {bar} {Foo!K}
interpolate_example.py
func
anntzer/structured-docstrings
python
@interpolate_doc def func(): '\n this is a docstring\n\n {interpolate_example.foo}\n\n {bar}\n\n {Foo!K}\n '
@interpolate_doc def bad_doc(): '\n fields {must} be preceded by whitespace\n '
6,683,463,889,826,569,000
fields {must} be preceded by whitespace
interpolate_example.py
bad_doc
anntzer/structured-docstrings
python
@interpolate_doc def bad_doc(): '\n \n '
@pytest.mark.django_db def test_force_staff_sso(client): 'Test that URLs and redirects are in place.' settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True settings.AUTHBROKER_CLIENT_ID = 'debug' settings.AUTHBROKER_CLIENT_SECRET = 'debug' settings.AUTHBROKER_URL = 'https://test.com' reload_urlconf() assert (reverse('authbroker_client:login') == '/auth/login/') assert (reverse('authbroker_client:callback') == '/auth/callback/') response = client.get('/admin/login/') assert (response.status_code == 302) assert (response.url == '/auth/login/') settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False reload_urlconf()
1,363,510,666,531,336,200
Test that URLs and redirects are in place.
tests/users/test_views.py
test_force_staff_sso
uktrade/directory-cms
python
@pytest.mark.django_db def test_force_staff_sso(client): settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True settings.AUTHBROKER_CLIENT_ID = 'debug' settings.AUTHBROKER_CLIENT_SECRET = 'debug' settings.AUTHBROKER_URL = 'https://test.com' reload_urlconf() assert (reverse('authbroker_client:login') == '/auth/login/') assert (reverse('authbroker_client:callback') == '/auth/callback/') response = client.get('/admin/login/') assert (response.status_code == 302) assert (response.url == '/auth/login/') settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False reload_urlconf()
def __init__(self, db_conn): '\n init\n :return:\n ' self.parseDbConn(db_conn) self.__initDbClient()
-2,465,099,712,075,066,400
init :return:
db/dbClient.py
__init__
dota2heqiuzhi/proxy_pool
python
def __init__(self, db_conn): '\n init\n :return:\n ' self.parseDbConn(db_conn) self.__initDbClient()
def __initDbClient(self): '\n init DB Client\n :return:\n ' __type = None if ('SSDB' == self.db_type): __type = 'ssdbClient' elif ('REDIS' == self.db_type): __type = 'redisClient' elif ('POSTGRESQL' == self.db_type): __type = 'postgresqlClient' else: pass assert __type, 'type error, Not support DB type: {}'.format(self.db_type) self.client = getattr(__import__(__type), ('%sClient' % self.db_type.title()))(host=self.db_host, port=self.db_port, username=self.db_user, password=self.db_pwd, db=self.db_name)
-8,341,425,307,030,236,000
init DB Client :return:
db/dbClient.py
__initDbClient
dota2heqiuzhi/proxy_pool
python
def __initDbClient(self): '\n init DB Client\n :return:\n ' __type = None if ('SSDB' == self.db_type): __type = 'ssdbClient' elif ('REDIS' == self.db_type): __type = 'redisClient' elif ('POSTGRESQL' == self.db_type): __type = 'postgresqlClient' else: pass assert __type, 'type error, Not support DB type: {}'.format(self.db_type) self.client = getattr(__import__(__type), ('%sClient' % self.db_type.title()))(host=self.db_host, port=self.db_port, username=self.db_user, password=self.db_pwd, db=self.db_name)
def testV1alpha1PriorityClass(self): '\n Test V1alpha1PriorityClass\n ' pass
8,672,129,437,725,520,000
Test V1alpha1PriorityClass
kubernetes/test/test_v1alpha1_priority_class.py
testV1alpha1PriorityClass
MiaoRachelYu/python
python
def testV1alpha1PriorityClass(self): '\n \n ' pass
def fit(self, Xi_train, Xv_train, y_train, Xi_valid=None, Xv_valid=None, y_valid=None, early_stopping=False, refit=False): '\n :param Xi_train: [[ind1_1, ind1_2, ...], [ind2_1, ind2_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]\n indi_j is the feature index of feature field j of sample i in the training set\n :param Xv_train: [[val1_1, val1_2, ...], [val2_1, val2_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]\n vali_j is the feature value of feature field j of sample i in the training set\n vali_j can be either binary (1/0, for binary/categorical features) or float (e.g., 10.24, for numerical features)\n :param y_train: label of each sample in the training set\n :param Xi_valid: list of list of feature indices of each sample in the validation set\n :param Xv_valid: list of list of feature values of each sample in the validation set\n :param y_valid: label of each sample in the validation set\n :param early_stopping: perform early stopping or not\n :param refit: refit the model on the train+valid dataset or not\n :return: None\n ' has_valid = (Xv_valid is not None) Xi_train = Xi_train.copy() Xv_train = Xv_train.copy() y_train = y_train.copy() for epoch in range(self.epoch): t1 = time() self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train) total_batch = int((len(y_train) / self.batch_size)) for i in range(total_batch): (Xi_batch, Xv_batch, y_batch) = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i) (trian_out, train_loss) = self.fit_on_batch(Xi_batch, Xv_batch, y_batch) if ((i % 1000) == 0): print(('epoch:%d batch:%d train_loss=%.4f' % (epoch, i, train_loss)), file=sys.stderr) train_me = self.evaluate(Xi_train, Xv_train, y_train) self.train_result.append(train_me) if has_valid: valid_me = self.evaluate(Xi_valid, Xv_valid, y_valid) self.valid_result.append(valid_me) if ((self.verbose > 0) and ((epoch % self.verbose) == 0)): print(('[%d] [train] auc=%.4f acc=%.4f mse=%.4f precision_1=%.4f recall_1=%.4f [%.1f s]' % ((epoch + 1), train_me['auc'], train_me['acc'], train_me['mse'], train_me['precision_1'], train_me['recall_1'], (time() - t1)))) if has_valid: print(('[%d] [valid] auc=%.4f acc=%.4f mse=%.4f precision_1=%.4f recall_1=%.4f [%.1f s]' % ((epoch + 1), valid_me['auc'], valid_me['acc'], valid_me['mse'], valid_me['precision_1'], valid_me['recall_1'], (time() - t1)))) if (has_valid and early_stopping and self.training_termination(self.valid_result)): break if (has_valid and refit): if self.greater_is_better: best_valid_score = max(self.valid_result) else: best_valid_score = min(self.valid_result) best_epoch = self.valid_result.index(best_valid_score) best_train_score = self.train_result[best_epoch] Xi_train = (Xi_train + Xi_valid) Xv_train = (Xv_train + Xv_valid) y_train = (y_train + y_valid) for epoch in range(100): self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train) total_batch = int((len(y_train) / self.batch_size)) for i in range(total_batch): (Xi_batch, Xv_batch, y_batch) = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i) self.fit_on_batch(Xi_batch, Xv_batch, y_batch) train_result = self.evaluate(Xi_train, Xv_train, y_train) if ((abs((train_result - best_train_score)) < 0.001) or (self.greater_is_better and (train_result > best_train_score)) or ((not self.greater_is_better) and (train_result < best_train_score))): break
4,153,447,849,296,950,300
:param Xi_train: [[ind1_1, ind1_2, ...], [ind2_1, ind2_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...] indi_j is the feature index of feature field j of sample i in the training set :param Xv_train: [[val1_1, val1_2, ...], [val2_1, val2_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...] vali_j is the feature value of feature field j of sample i in the training set vali_j can be either binary (1/0, for binary/categorical features) or float (e.g., 10.24, for numerical features) :param y_train: label of each sample in the training set :param Xi_valid: list of list of feature indices of each sample in the validation set :param Xv_valid: list of list of feature values of each sample in the validation set :param y_valid: label of each sample in the validation set :param early_stopping: perform early stopping or not :param refit: refit the model on the train+valid dataset or not :return: None
zzh/mllib/model/_deep_fm.py
fit
zhangzhenhu/zzh
python
def fit(self, Xi_train, Xv_train, y_train, Xi_valid=None, Xv_valid=None, y_valid=None, early_stopping=False, refit=False): '\n :param Xi_train: [[ind1_1, ind1_2, ...], [ind2_1, ind2_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]\n indi_j is the feature index of feature field j of sample i in the training set\n :param Xv_train: [[val1_1, val1_2, ...], [val2_1, val2_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]\n vali_j is the feature value of feature field j of sample i in the training set\n vali_j can be either binary (1/0, for binary/categorical features) or float (e.g., 10.24, for numerical features)\n :param y_train: label of each sample in the training set\n :param Xi_valid: list of list of feature indices of each sample in the validation set\n :param Xv_valid: list of list of feature values of each sample in the validation set\n :param y_valid: label of each sample in the validation set\n :param early_stopping: perform early stopping or not\n :param refit: refit the model on the train+valid dataset or not\n :return: None\n ' has_valid = (Xv_valid is not None) Xi_train = Xi_train.copy() Xv_train = Xv_train.copy() y_train = y_train.copy() for epoch in range(self.epoch): t1 = time() self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train) total_batch = int((len(y_train) / self.batch_size)) for i in range(total_batch): (Xi_batch, Xv_batch, y_batch) = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i) (trian_out, train_loss) = self.fit_on_batch(Xi_batch, Xv_batch, y_batch) if ((i % 1000) == 0): print(('epoch:%d batch:%d train_loss=%.4f' % (epoch, i, train_loss)), file=sys.stderr) train_me = self.evaluate(Xi_train, Xv_train, y_train) self.train_result.append(train_me) if has_valid: valid_me = self.evaluate(Xi_valid, Xv_valid, y_valid) self.valid_result.append(valid_me) if ((self.verbose > 0) and ((epoch % self.verbose) == 0)): print(('[%d] [train] auc=%.4f acc=%.4f mse=%.4f precision_1=%.4f recall_1=%.4f [%.1f s]' % ((epoch + 1), train_me['auc'], train_me['acc'], train_me['mse'], train_me['precision_1'], train_me['recall_1'], (time() - t1)))) if has_valid: print(('[%d] [valid] auc=%.4f acc=%.4f mse=%.4f precision_1=%.4f recall_1=%.4f [%.1f s]' % ((epoch + 1), valid_me['auc'], valid_me['acc'], valid_me['mse'], valid_me['precision_1'], valid_me['recall_1'], (time() - t1)))) if (has_valid and early_stopping and self.training_termination(self.valid_result)): break if (has_valid and refit): if self.greater_is_better: best_valid_score = max(self.valid_result) else: best_valid_score = min(self.valid_result) best_epoch = self.valid_result.index(best_valid_score) best_train_score = self.train_result[best_epoch] Xi_train = (Xi_train + Xi_valid) Xv_train = (Xv_train + Xv_valid) y_train = (y_train + y_valid) for epoch in range(100): self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train) total_batch = int((len(y_train) / self.batch_size)) for i in range(total_batch): (Xi_batch, Xv_batch, y_batch) = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i) self.fit_on_batch(Xi_batch, Xv_batch, y_batch) train_result = self.evaluate(Xi_train, Xv_train, y_train) if ((abs((train_result - best_train_score)) < 0.001) or (self.greater_is_better and (train_result > best_train_score)) or ((not self.greater_is_better) and (train_result < best_train_score))): break
def predict(self, Xi, Xv): '\n :param Xi: list of list of feature indices of each sample in the dataset\n :param Xv: list of list of feature values of each sample in the dataset\n :return: predicted probability of each sample\n ' dummy_y = ([1] * len(Xi)) batch_index = 0 (Xi_batch, Xv_batch, y_batch) = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index) y_pred = None while (len(Xi_batch) > 0): num_batch = len(y_batch) feed_dict = {self.feat_index: Xi_batch, self.feat_value: Xv_batch, self.dropout_keep_fm: ([1.0] * len(self.dropout_fm)), self.dropout_keep_deep: ([1.0] * len(self.dropout_deep)), self.train_phase: False} batch_out = self.sess.run(self.out, feed_dict=feed_dict) if (batch_index == 0): y_pred = np.reshape(batch_out, (num_batch,)) else: y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,)))) batch_index += 1 (Xi_batch, Xv_batch, y_batch) = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index) return y_pred
4,152,048,524,689,723,000
:param Xi: list of list of feature indices of each sample in the dataset :param Xv: list of list of feature values of each sample in the dataset :return: predicted probability of each sample
zzh/mllib/model/_deep_fm.py
predict
zhangzhenhu/zzh
python
def predict(self, Xi, Xv): '\n :param Xi: list of list of feature indices of each sample in the dataset\n :param Xv: list of list of feature values of each sample in the dataset\n :return: predicted probability of each sample\n ' dummy_y = ([1] * len(Xi)) batch_index = 0 (Xi_batch, Xv_batch, y_batch) = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index) y_pred = None while (len(Xi_batch) > 0): num_batch = len(y_batch) feed_dict = {self.feat_index: Xi_batch, self.feat_value: Xv_batch, self.dropout_keep_fm: ([1.0] * len(self.dropout_fm)), self.dropout_keep_deep: ([1.0] * len(self.dropout_deep)), self.train_phase: False} batch_out = self.sess.run(self.out, feed_dict=feed_dict) if (batch_index == 0): y_pred = np.reshape(batch_out, (num_batch,)) else: y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,)))) batch_index += 1 (Xi_batch, Xv_batch, y_batch) = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index) return y_pred
def evaluate(self, Xi, Xv, y_true): '\n :param Xi: list of list of feature indices of each sample in the dataset\n :param Xv: list of list of feature values of each sample in the dataset\n :param y: label of each sample in the dataset\n :return: metric of the evaluation\n ' size = y_true.shape[0] y_pred = self.predict(Xi, Xv) error = (y_true - y_pred) mse = ((error * error).sum() / size) y_pred_m = y_pred.copy() y_pred_m[(y_pred_m >= self.threshold)] = 1 y_pred_m[(y_pred_m < self.threshold)] = 0 cm = metrics.confusion_matrix(y_true, y_pred_m, labels=[1, 0]) real_1_count = cm[0, :].sum() predict_1_count = cm[:, 0].sum() right_1_count = cm[(0, 0)] if (predict_1_count == 0): precision_1 = 0 else: precision_1 = (right_1_count / predict_1_count) if (real_1_count == 0): recall_1 = 0 else: recall_1 = (right_1_count / real_1_count) return {'size': size, 'acc': ((cm[(0, 0)] + cm[(1, 1)]) / size), 'precision_1': precision_1, 'recall_1': recall_1, 'auc': self.eval_metric(y_true, y_pred), 'mse': mse}
7,261,298,830,425,360,000
:param Xi: list of list of feature indices of each sample in the dataset :param Xv: list of list of feature values of each sample in the dataset :param y: label of each sample in the dataset :return: metric of the evaluation
zzh/mllib/model/_deep_fm.py
evaluate
zhangzhenhu/zzh
python
def evaluate(self, Xi, Xv, y_true): '\n :param Xi: list of list of feature indices of each sample in the dataset\n :param Xv: list of list of feature values of each sample in the dataset\n :param y: label of each sample in the dataset\n :return: metric of the evaluation\n ' size = y_true.shape[0] y_pred = self.predict(Xi, Xv) error = (y_true - y_pred) mse = ((error * error).sum() / size) y_pred_m = y_pred.copy() y_pred_m[(y_pred_m >= self.threshold)] = 1 y_pred_m[(y_pred_m < self.threshold)] = 0 cm = metrics.confusion_matrix(y_true, y_pred_m, labels=[1, 0]) real_1_count = cm[0, :].sum() predict_1_count = cm[:, 0].sum() right_1_count = cm[(0, 0)] if (predict_1_count == 0): precision_1 = 0 else: precision_1 = (right_1_count / predict_1_count) if (real_1_count == 0): recall_1 = 0 else: recall_1 = (right_1_count / real_1_count) return {'size': size, 'acc': ((cm[(0, 0)] + cm[(1, 1)]) / size), 'precision_1': precision_1, 'recall_1': recall_1, 'auc': self.eval_metric(y_true, y_pred), 'mse': mse}
def test_create_valid_user_successful(self): 'Test creating user with valid payload is successful' payload = {'email': '[email protected]', 'password': 'testpass', 'name': 'John Doe'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) user = get_user_model().objects.get(**res.data) self.assertTrue(user.check_password(payload['password'])) self.assertNotIn('password', res.data)
-2,702,256,109,293,972,000
Test creating user with valid payload is successful
app/user/tests/test_user_api.py
test_create_valid_user_successful
reallyusefulengine/django_rest_recipe
python
def test_create_valid_user_successful(self): payload = {'email': '[email protected]', 'password': 'testpass', 'name': 'John Doe'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) user = get_user_model().objects.get(**res.data) self.assertTrue(user.check_password(payload['password'])) self.assertNotIn('password', res.data)
def test_password_too_short(self): 'tests that the password must be more than 5 characters' payload = {'email': '[email protected]', 'password': 'pass', 'name': 'John Doe'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) user_exists = get_user_model().objects.filter(email=payload['email']).exists() self.assertFalse(user_exists)
5,859,076,869,366,854,000
tests that the password must be more than 5 characters
app/user/tests/test_user_api.py
test_password_too_short
reallyusefulengine/django_rest_recipe
python
def test_password_too_short(self): payload = {'email': '[email protected]', 'password': 'pass', 'name': 'John Doe'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) user_exists = get_user_model().objects.filter(email=payload['email']).exists() self.assertFalse(user_exists)
def test_create_token_for_user(self): 'Test that a token is created for a user' payload = {'email': '[email protected]', 'password': 'testpass'} create_user(**payload) res = self.client.post(TOKEN_URL, payload) self.assertTrue(res.status_code, status.HTTP_200_OK) self.assertIn('token', res.data)
-590,296,323,168,376,700
Test that a token is created for a user
app/user/tests/test_user_api.py
test_create_token_for_user
reallyusefulengine/django_rest_recipe
python
def test_create_token_for_user(self): payload = {'email': '[email protected]', 'password': 'testpass'} create_user(**payload) res = self.client.post(TOKEN_URL, payload) self.assertTrue(res.status_code, status.HTTP_200_OK) self.assertIn('token', res.data)
def test_create_token_invalid_credentials(self): 'Test that token is not created if invalid credentials are given' create_user(email='[email protected]', password='testpass') payload = {'email': '[email protected]', 'password': 'wrong'} res = self.client.post(TOKEN_URL, payload) self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST) self.assertNotIn('token', res.data)
-7,467,095,345,633,051,000
Test that token is not created if invalid credentials are given
app/user/tests/test_user_api.py
test_create_token_invalid_credentials
reallyusefulengine/django_rest_recipe
python
def test_create_token_invalid_credentials(self): create_user(email='[email protected]', password='testpass') payload = {'email': '[email protected]', 'password': 'wrong'} res = self.client.post(TOKEN_URL, payload) self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST) self.assertNotIn('token', res.data)
def test_create_token_no_user(self): 'Test that token is not created if user does not exist' payload = {'email': '[email protected]', 'password': 'wrong'} res = self.client.post(TOKEN_URL, payload) self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST) self.assertNotIn('token', res.data)
-6,124,620,769,133,167,000
Test that token is not created if user does not exist
app/user/tests/test_user_api.py
test_create_token_no_user
reallyusefulengine/django_rest_recipe
python
def test_create_token_no_user(self): payload = {'email': '[email protected]', 'password': 'wrong'} res = self.client.post(TOKEN_URL, payload) self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST) self.assertNotIn('token', res.data)
def test_create_token_no_missing_field(self): 'Test that token is not created if email/password not given' res = self.client.post(TOKEN_URL, {'email': '[email protected]', 'password': ''}) self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST) self.assertNotIn('token', res.data)
-2,498,119,025,504,459,000
Test that token is not created if email/password not given
app/user/tests/test_user_api.py
test_create_token_no_missing_field
reallyusefulengine/django_rest_recipe
python
def test_create_token_no_missing_field(self): res = self.client.post(TOKEN_URL, {'email': '[email protected]', 'password': }) self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST) self.assertNotIn('token', res.data)
def _softplus(x): 'Implements the softplus function.' return torch.nn.functional.softplus(x, beta=1, threshold=10000)
-8,304,163,192,589,108,000
Implements the softplus function.
utils/inverter.py
_softplus
Twizwei/idinvert_pytorch
python
def _softplus(x): return torch.nn.functional.softplus(x, beta=1, threshold=10000)
def _get_tensor_value(tensor): 'Gets the value of a torch Tensor.' return tensor.cpu().detach().numpy()
5,971,145,564,746,899,000
Gets the value of a torch Tensor.
utils/inverter.py
_get_tensor_value
Twizwei/idinvert_pytorch
python
def _get_tensor_value(tensor): return tensor.cpu().detach().numpy()
def __init__(self, model_name, learning_rate=0.01, iteration=100, reconstruction_loss_weight=1.0, perceptual_loss_weight=5e-05, regularization_loss_weight=2.0, logger=None): 'Initializes the inverter.\n\n NOTE: Only Adam optimizer is supported in the optimization process.\n\n Args:\n model_name: Name of the model on which the inverted is based. The model\n should be first registered in `models/model_settings.py`.\n logger: Logger to record the log message.\n learning_rate: Learning rate for optimization. (default: 1e-2)\n iteration: Number of iterations for optimization. (default: 100)\n reconstruction_loss_weight: Weight for reconstruction loss. Should always\n be a positive number. (default: 1.0)\n perceptual_loss_weight: Weight for perceptual loss. 0 disables perceptual\n loss. (default: 5e-5)\n regularization_loss_weight: Weight for regularization loss from encoder.\n This is essential for in-domain inversion. However, this loss will\n automatically ignored if the generative model does not include a valid\n encoder. 0 disables regularization loss. (default: 2.0)\n ' self.logger = logger self.model_name = model_name self.gan_type = 'stylegan' self.G = StyleGANGenerator(self.model_name, self.logger) self.E = StyleGANEncoder(self.model_name, self.logger) self.F = PerceptualModel(min_val=self.G.min_val, max_val=self.G.max_val) self.encode_dim = [self.G.num_layers, self.G.w_space_dim] self.run_device = self.G.run_device assert (list(self.encode_dim) == list(self.E.encode_dim)) assert (self.G.gan_type == self.gan_type) assert (self.E.gan_type == self.gan_type) self.learning_rate = learning_rate self.iteration = iteration self.loss_pix_weight = reconstruction_loss_weight self.loss_feat_weight = perceptual_loss_weight self.loss_reg_weight = regularization_loss_weight assert (self.loss_pix_weight > 0)
4,956,284,454,006,761,000
Initializes the inverter. NOTE: Only Adam optimizer is supported in the optimization process. Args: model_name: Name of the model on which the inverted is based. The model should be first registered in `models/model_settings.py`. logger: Logger to record the log message. learning_rate: Learning rate for optimization. (default: 1e-2) iteration: Number of iterations for optimization. (default: 100) reconstruction_loss_weight: Weight for reconstruction loss. Should always be a positive number. (default: 1.0) perceptual_loss_weight: Weight for perceptual loss. 0 disables perceptual loss. (default: 5e-5) regularization_loss_weight: Weight for regularization loss from encoder. This is essential for in-domain inversion. However, this loss will automatically ignored if the generative model does not include a valid encoder. 0 disables regularization loss. (default: 2.0)
utils/inverter.py
__init__
Twizwei/idinvert_pytorch
python
def __init__(self, model_name, learning_rate=0.01, iteration=100, reconstruction_loss_weight=1.0, perceptual_loss_weight=5e-05, regularization_loss_weight=2.0, logger=None): 'Initializes the inverter.\n\n NOTE: Only Adam optimizer is supported in the optimization process.\n\n Args:\n model_name: Name of the model on which the inverted is based. The model\n should be first registered in `models/model_settings.py`.\n logger: Logger to record the log message.\n learning_rate: Learning rate for optimization. (default: 1e-2)\n iteration: Number of iterations for optimization. (default: 100)\n reconstruction_loss_weight: Weight for reconstruction loss. Should always\n be a positive number. (default: 1.0)\n perceptual_loss_weight: Weight for perceptual loss. 0 disables perceptual\n loss. (default: 5e-5)\n regularization_loss_weight: Weight for regularization loss from encoder.\n This is essential for in-domain inversion. However, this loss will\n automatically ignored if the generative model does not include a valid\n encoder. 0 disables regularization loss. (default: 2.0)\n ' self.logger = logger self.model_name = model_name self.gan_type = 'stylegan' self.G = StyleGANGenerator(self.model_name, self.logger) self.E = StyleGANEncoder(self.model_name, self.logger) self.F = PerceptualModel(min_val=self.G.min_val, max_val=self.G.max_val) self.encode_dim = [self.G.num_layers, self.G.w_space_dim] self.run_device = self.G.run_device assert (list(self.encode_dim) == list(self.E.encode_dim)) assert (self.G.gan_type == self.gan_type) assert (self.E.gan_type == self.gan_type) self.learning_rate = learning_rate self.iteration = iteration self.loss_pix_weight = reconstruction_loss_weight self.loss_feat_weight = perceptual_loss_weight self.loss_reg_weight = regularization_loss_weight assert (self.loss_pix_weight > 0)
def preprocess(self, image): 'Preprocesses a single image.\n\n This function assumes the input numpy array is with shape [height, width,\n channel], channel order `RGB`, and pixel range [0, 255].\n\n The returned image is with shape [channel, new_height, new_width], where\n `new_height` and `new_width` are specified by the given generative model.\n The channel order of returned image is also specified by the generative\n model. The pixel range is shifted to [min_val, max_val], where `min_val` and\n `max_val` are also specified by the generative model.\n ' if (not isinstance(image, np.ndarray)): raise ValueError(f'Input image should be with type `numpy.ndarray`!') if (image.dtype != np.uint8): raise ValueError(f'Input image should be with dtype `numpy.uint8`!') if ((image.ndim != 3) or (image.shape[2] not in [1, 3])): raise ValueError(f'''Input should be with shape [height, width, channel], where channel equals to 1 or 3! But {image.shape} is received!''') if ((image.shape[2] == 1) and (self.G.image_channels == 3)): image = np.tile(image, (1, 1, 3)) if (image.shape[2] != self.G.image_channels): raise ValueError(f'Number of channels of input image, which is {image.shape[2]}, is not supported by the current inverter, which requires {self.G.image_channels} channels!') if ((self.G.image_channels == 3) and (self.G.channel_order == 'BGR')): image = image[:, :, ::(- 1)] if (image.shape[1:3] != [self.G.resolution, self.G.resolution]): image = cv2.resize(image, (self.G.resolution, self.G.resolution)) image = image.astype(np.float32) image = (((image / 255.0) * (self.G.max_val - self.G.min_val)) + self.G.min_val) image = image.astype(np.float32).transpose(2, 0, 1) return image
8,297,031,020,724,259,000
Preprocesses a single image. This function assumes the input numpy array is with shape [height, width, channel], channel order `RGB`, and pixel range [0, 255]. The returned image is with shape [channel, new_height, new_width], where `new_height` and `new_width` are specified by the given generative model. The channel order of returned image is also specified by the generative model. The pixel range is shifted to [min_val, max_val], where `min_val` and `max_val` are also specified by the generative model.
utils/inverter.py
preprocess
Twizwei/idinvert_pytorch
python
def preprocess(self, image): 'Preprocesses a single image.\n\n This function assumes the input numpy array is with shape [height, width,\n channel], channel order `RGB`, and pixel range [0, 255].\n\n The returned image is with shape [channel, new_height, new_width], where\n `new_height` and `new_width` are specified by the given generative model.\n The channel order of returned image is also specified by the generative\n model. The pixel range is shifted to [min_val, max_val], where `min_val` and\n `max_val` are also specified by the generative model.\n ' if (not isinstance(image, np.ndarray)): raise ValueError(f'Input image should be with type `numpy.ndarray`!') if (image.dtype != np.uint8): raise ValueError(f'Input image should be with dtype `numpy.uint8`!') if ((image.ndim != 3) or (image.shape[2] not in [1, 3])): raise ValueError(f'Input should be with shape [height, width, channel], where channel equals to 1 or 3! But {image.shape} is received!') if ((image.shape[2] == 1) and (self.G.image_channels == 3)): image = np.tile(image, (1, 1, 3)) if (image.shape[2] != self.G.image_channels): raise ValueError(f'Number of channels of input image, which is {image.shape[2]}, is not supported by the current inverter, which requires {self.G.image_channels} channels!') if ((self.G.image_channels == 3) and (self.G.channel_order == 'BGR')): image = image[:, :, ::(- 1)] if (image.shape[1:3] != [self.G.resolution, self.G.resolution]): image = cv2.resize(image, (self.G.resolution, self.G.resolution)) image = image.astype(np.float32) image = (((image / 255.0) * (self.G.max_val - self.G.min_val)) + self.G.min_val) image = image.astype(np.float32).transpose(2, 0, 1) return image
def get_init_code(self, image): 'Gets initial latent codes as the start point for optimization.\n\n The input image is assumed to have already been preprocessed, meaning to\n have shape [self.G.image_channels, self.G.resolution, self.G.resolution],\n channel order `self.G.channel_order`, and pixel range [self.G.min_val,\n self.G.max_val].\n ' x = image[np.newaxis] x = self.G.to_tensor(x.astype(np.float32)) z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim)) return z.astype(np.float32)
-7,680,309,811,430,556,000
Gets initial latent codes as the start point for optimization. The input image is assumed to have already been preprocessed, meaning to have shape [self.G.image_channels, self.G.resolution, self.G.resolution], channel order `self.G.channel_order`, and pixel range [self.G.min_val, self.G.max_val].
utils/inverter.py
get_init_code
Twizwei/idinvert_pytorch
python
def get_init_code(self, image): 'Gets initial latent codes as the start point for optimization.\n\n The input image is assumed to have already been preprocessed, meaning to\n have shape [self.G.image_channels, self.G.resolution, self.G.resolution],\n channel order `self.G.channel_order`, and pixel range [self.G.min_val,\n self.G.max_val].\n ' x = image[np.newaxis] x = self.G.to_tensor(x.astype(np.float32)) z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim)) return z.astype(np.float32)
def invert(self, image, num_viz=0): 'Inverts the given image to a latent code.\n\n Basically, this function is based on gradient descent algorithm.\n\n Args:\n image: Target image to invert, which is assumed to have already been\n preprocessed.\n num_viz: Number of intermediate outputs to visualize. (default: 0)\n\n Returns:\n A two-element tuple. First one is the inverted code. Second one is a list\n of intermediate results, where first image is the input image, second\n one is the reconstructed result from the initial latent code, remainings\n are from the optimization process every `self.iteration // num_viz`\n steps.\n ' x = image[np.newaxis] x = self.G.to_tensor(x.astype(np.float32)) x.requires_grad = False init_z = self.get_init_code(image) z = torch.Tensor(init_z).to(self.run_device) z.requires_grad = True optimizer = torch.optim.Adam([z], lr=self.learning_rate) viz_results = [] viz_results.append(self.G.postprocess(_get_tensor_value(x))[0]) x_init_inv = self.G.net.synthesis(z) viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0]) pbar = tqdm(range(1, (self.iteration + 1)), leave=True) for step in pbar: loss = 0.0 x_rec = self.G.net.synthesis(z) loss_pix = torch.mean(((x - x_rec) ** 2)) loss = (loss + (loss_pix * self.loss_pix_weight)) log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}' if self.loss_feat_weight: x_feat = self.F.net(x) x_rec_feat = self.F.net(x_rec) loss_feat = torch.mean(((x_feat - x_rec_feat) ** 2)) loss = (loss + (loss_feat * self.loss_feat_weight)) log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}' if self.loss_reg_weight: z_rec = self.E.net(x_rec).view(1, *self.encode_dim) loss_reg = torch.mean(((z - z_rec) ** 2)) loss = (loss + (loss_reg * self.loss_reg_weight)) log_message += f', loss_reg: {_get_tensor_value(loss_reg):.3f}' log_message += f', loss: {_get_tensor_value(loss):.3f}' pbar.set_description_str(log_message) if self.logger: self.logger.debug(f'Step: {step:05d}, lr: {self.learning_rate:.2e}, {log_message}') optimizer.zero_grad() loss.backward() optimizer.step() if ((num_viz > 0) and ((step % (self.iteration // num_viz)) == 0)): viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0]) return (_get_tensor_value(z), viz_results)
-5,647,097,221,206,265,000
Inverts the given image to a latent code. Basically, this function is based on gradient descent algorithm. Args: image: Target image to invert, which is assumed to have already been preprocessed. num_viz: Number of intermediate outputs to visualize. (default: 0) Returns: A two-element tuple. First one is the inverted code. Second one is a list of intermediate results, where first image is the input image, second one is the reconstructed result from the initial latent code, remainings are from the optimization process every `self.iteration // num_viz` steps.
utils/inverter.py
invert
Twizwei/idinvert_pytorch
python
def invert(self, image, num_viz=0): 'Inverts the given image to a latent code.\n\n Basically, this function is based on gradient descent algorithm.\n\n Args:\n image: Target image to invert, which is assumed to have already been\n preprocessed.\n num_viz: Number of intermediate outputs to visualize. (default: 0)\n\n Returns:\n A two-element tuple. First one is the inverted code. Second one is a list\n of intermediate results, where first image is the input image, second\n one is the reconstructed result from the initial latent code, remainings\n are from the optimization process every `self.iteration // num_viz`\n steps.\n ' x = image[np.newaxis] x = self.G.to_tensor(x.astype(np.float32)) x.requires_grad = False init_z = self.get_init_code(image) z = torch.Tensor(init_z).to(self.run_device) z.requires_grad = True optimizer = torch.optim.Adam([z], lr=self.learning_rate) viz_results = [] viz_results.append(self.G.postprocess(_get_tensor_value(x))[0]) x_init_inv = self.G.net.synthesis(z) viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0]) pbar = tqdm(range(1, (self.iteration + 1)), leave=True) for step in pbar: loss = 0.0 x_rec = self.G.net.synthesis(z) loss_pix = torch.mean(((x - x_rec) ** 2)) loss = (loss + (loss_pix * self.loss_pix_weight)) log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}' if self.loss_feat_weight: x_feat = self.F.net(x) x_rec_feat = self.F.net(x_rec) loss_feat = torch.mean(((x_feat - x_rec_feat) ** 2)) loss = (loss + (loss_feat * self.loss_feat_weight)) log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}' if self.loss_reg_weight: z_rec = self.E.net(x_rec).view(1, *self.encode_dim) loss_reg = torch.mean(((z - z_rec) ** 2)) loss = (loss + (loss_reg * self.loss_reg_weight)) log_message += f', loss_reg: {_get_tensor_value(loss_reg):.3f}' log_message += f', loss: {_get_tensor_value(loss):.3f}' pbar.set_description_str(log_message) if self.logger: self.logger.debug(f'Step: {step:05d}, lr: {self.learning_rate:.2e}, {log_message}') optimizer.zero_grad() loss.backward() optimizer.step() if ((num_viz > 0) and ((step % (self.iteration // num_viz)) == 0)): viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0]) return (_get_tensor_value(z), viz_results)
def easy_invert(self, image, num_viz=0): 'Wraps functions `preprocess()` and `invert()` together.' return self.invert(self.preprocess(image), num_viz)
8,442,911,914,438,245,000
Wraps functions `preprocess()` and `invert()` together.
utils/inverter.py
easy_invert
Twizwei/idinvert_pytorch
python
def easy_invert(self, image, num_viz=0): return self.invert(self.preprocess(image), num_viz)
def diffuse(self, target, context, center_x, center_y, crop_x, crop_y, num_viz=0): 'Diffuses the target image to a context image.\n\n Basically, this function is a motified version of `self.invert()`. More\n concretely, the encoder regularizer is removed from the objectives and the\n reconstruction loss is computed from the masked region.\n\n Args:\n target: Target image (foreground).\n context: Context image (background).\n center_x: The x-coordinate of the crop center.\n center_y: The y-coordinate of the crop center.\n crop_x: The crop size along the x-axis.\n crop_y: The crop size along the y-axis.\n num_viz: Number of intermediate outputs to visualize. (default: 0)\n\n Returns:\n A two-element tuple. First one is the inverted code. Second one is a list\n of intermediate results, where first image is the direct copy-paste\n image, second one is the reconstructed result from the initial latent\n code, remainings are from the optimization process every\n `self.iteration // num_viz` steps.\n ' image_shape = (self.G.image_channels, self.G.resolution, self.G.resolution) mask = np.zeros((1, *image_shape), dtype=np.float32) xx = (center_x - (crop_x // 2)) yy = (center_y - (crop_y // 2)) mask[:, :, yy:(yy + crop_y), xx:(xx + crop_x)] = 1.0 target = target[np.newaxis] context = context[np.newaxis] x = ((target * mask) + (context * (1 - mask))) x = self.G.to_tensor(x.astype(np.float32)) x.requires_grad = False mask = self.G.to_tensor(mask.astype(np.float32)) mask.requires_grad = False init_z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim)) init_z = init_z.astype(np.float32) z = torch.Tensor(init_z).to(self.run_device) z.requires_grad = True optimizer = torch.optim.Adam([z], lr=self.learning_rate) viz_results = [] viz_results.append(self.G.postprocess(_get_tensor_value(x))[0]) x_init_inv = self.G.net.synthesis(z) viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0]) pbar = tqdm(range(1, (self.iteration + 1)), leave=True) for step in pbar: loss = 0.0 x_rec = self.G.net.synthesis(z) loss_pix = torch.mean((((x - x_rec) * mask) ** 2)) loss = (loss + (loss_pix * self.loss_pix_weight)) log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}' if self.loss_feat_weight: x_feat = self.F.net((x * mask)) x_rec_feat = self.F.net((x_rec * mask)) loss_feat = torch.mean(((x_feat - x_rec_feat) ** 2)) loss = (loss + (loss_feat * self.loss_feat_weight)) log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}' log_message += f', loss: {_get_tensor_value(loss):.3f}' pbar.set_description_str(log_message) if self.logger: self.logger.debug(f'Step: {step:05d}, lr: {self.learning_rate:.2e}, {log_message}') optimizer.zero_grad() loss.backward() optimizer.step() if ((num_viz > 0) and ((step % (self.iteration // num_viz)) == 0)): viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0]) return (_get_tensor_value(z), viz_results)
-7,616,106,853,401,418,000
Diffuses the target image to a context image. Basically, this function is a motified version of `self.invert()`. More concretely, the encoder regularizer is removed from the objectives and the reconstruction loss is computed from the masked region. Args: target: Target image (foreground). context: Context image (background). center_x: The x-coordinate of the crop center. center_y: The y-coordinate of the crop center. crop_x: The crop size along the x-axis. crop_y: The crop size along the y-axis. num_viz: Number of intermediate outputs to visualize. (default: 0) Returns: A two-element tuple. First one is the inverted code. Second one is a list of intermediate results, where first image is the direct copy-paste image, second one is the reconstructed result from the initial latent code, remainings are from the optimization process every `self.iteration // num_viz` steps.
utils/inverter.py
diffuse
Twizwei/idinvert_pytorch
python
def diffuse(self, target, context, center_x, center_y, crop_x, crop_y, num_viz=0): 'Diffuses the target image to a context image.\n\n Basically, this function is a motified version of `self.invert()`. More\n concretely, the encoder regularizer is removed from the objectives and the\n reconstruction loss is computed from the masked region.\n\n Args:\n target: Target image (foreground).\n context: Context image (background).\n center_x: The x-coordinate of the crop center.\n center_y: The y-coordinate of the crop center.\n crop_x: The crop size along the x-axis.\n crop_y: The crop size along the y-axis.\n num_viz: Number of intermediate outputs to visualize. (default: 0)\n\n Returns:\n A two-element tuple. First one is the inverted code. Second one is a list\n of intermediate results, where first image is the direct copy-paste\n image, second one is the reconstructed result from the initial latent\n code, remainings are from the optimization process every\n `self.iteration // num_viz` steps.\n ' image_shape = (self.G.image_channels, self.G.resolution, self.G.resolution) mask = np.zeros((1, *image_shape), dtype=np.float32) xx = (center_x - (crop_x // 2)) yy = (center_y - (crop_y // 2)) mask[:, :, yy:(yy + crop_y), xx:(xx + crop_x)] = 1.0 target = target[np.newaxis] context = context[np.newaxis] x = ((target * mask) + (context * (1 - mask))) x = self.G.to_tensor(x.astype(np.float32)) x.requires_grad = False mask = self.G.to_tensor(mask.astype(np.float32)) mask.requires_grad = False init_z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim)) init_z = init_z.astype(np.float32) z = torch.Tensor(init_z).to(self.run_device) z.requires_grad = True optimizer = torch.optim.Adam([z], lr=self.learning_rate) viz_results = [] viz_results.append(self.G.postprocess(_get_tensor_value(x))[0]) x_init_inv = self.G.net.synthesis(z) viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0]) pbar = tqdm(range(1, (self.iteration + 1)), leave=True) for step in pbar: loss = 0.0 x_rec = self.G.net.synthesis(z) loss_pix = torch.mean((((x - x_rec) * mask) ** 2)) loss = (loss + (loss_pix * self.loss_pix_weight)) log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}' if self.loss_feat_weight: x_feat = self.F.net((x * mask)) x_rec_feat = self.F.net((x_rec * mask)) loss_feat = torch.mean(((x_feat - x_rec_feat) ** 2)) loss = (loss + (loss_feat * self.loss_feat_weight)) log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}' log_message += f', loss: {_get_tensor_value(loss):.3f}' pbar.set_description_str(log_message) if self.logger: self.logger.debug(f'Step: {step:05d}, lr: {self.learning_rate:.2e}, {log_message}') optimizer.zero_grad() loss.backward() optimizer.step() if ((num_viz > 0) and ((step % (self.iteration // num_viz)) == 0)): viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0]) return (_get_tensor_value(z), viz_results)
def easy_diffuse(self, target, context, *args, **kwargs): 'Wraps functions `preprocess()` and `diffuse()` together.' return self.diffuse(self.preprocess(target), self.preprocess(context), *args, **kwargs)
-8,924,164,457,142,377,000
Wraps functions `preprocess()` and `diffuse()` together.
utils/inverter.py
easy_diffuse
Twizwei/idinvert_pytorch
python
def easy_diffuse(self, target, context, *args, **kwargs): return self.diffuse(self.preprocess(target), self.preprocess(context), *args, **kwargs)
async def async_start(hass: HomeAssistantType, discovery_topic, hass_config, config_entry=None) -> bool: 'Initialize of MQTT Discovery.' async def async_device_message_received(topic, payload, qos): 'Process the received message.' match = TOPIC_MATCHER.match(topic) if (not match): return (_prefix_topic, component, node_id, object_id) = match.groups() if (component not in SUPPORTED_COMPONENTS): _LOGGER.warning('Component %s is not supported', component) return if payload: try: payload = json.loads(payload) except ValueError: _LOGGER.warning("Unable to parse JSON %s: '%s'", object_id, payload) return payload = dict(payload) for key in list(payload.keys()): abbreviated_key = key key = ABBREVIATIONS.get(key, key) payload[key] = payload.pop(abbreviated_key) if (TOPIC_BASE in payload): base = payload[TOPIC_BASE] for (key, value) in payload.items(): if (isinstance(value, str) and value): if ((value[0] == TOPIC_BASE) and key.endswith('_topic')): payload[key] = '{}{}'.format(base, value[1:]) if ((value[(- 1)] == TOPIC_BASE) and key.endswith('_topic')): payload[key] = '{}{}'.format(value[:(- 1)], base) discovery_id = (' '.join((node_id, object_id)) if node_id else object_id) discovery_hash = (component, discovery_id) if payload: if ((CONF_PLATFORM in payload) and ('schema' not in payload)): platform = payload[CONF_PLATFORM] if ((component in DEPRECATED_PLATFORM_TO_SCHEMA) and (platform in DEPRECATED_PLATFORM_TO_SCHEMA[component])): schema = DEPRECATED_PLATFORM_TO_SCHEMA[component][platform] payload['schema'] = schema _LOGGER.warning('"platform": "%s" is deprecated, replace with "schema":"%s"', platform, schema) payload[CONF_PLATFORM] = 'mqtt' if (CONF_STATE_TOPIC not in payload): payload[CONF_STATE_TOPIC] = '{}/{}/{}{}/state'.format(discovery_topic, component, (('%s/' % node_id) if node_id else ''), object_id) payload[ATTR_DISCOVERY_HASH] = discovery_hash if (ALREADY_DISCOVERED not in hass.data): hass.data[ALREADY_DISCOVERED] = {} if (discovery_hash in hass.data[ALREADY_DISCOVERED]): _LOGGER.info('Component has already been discovered: %s %s, sending update', component, discovery_id) async_dispatcher_send(hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), payload) elif payload: _LOGGER.info('Found new component: %s %s', component, discovery_id) hass.data[ALREADY_DISCOVERED][discovery_hash] = None if (component not in CONFIG_ENTRY_COMPONENTS): (await async_load_platform(hass, component, 'mqtt', payload, hass_config)) return config_entries_key = '{}.{}'.format(component, 'mqtt') async with hass.data[DATA_CONFIG_ENTRY_LOCK]: if (config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]): (await hass.config_entries.async_forward_entry_setup(config_entry, component)) hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key) async_dispatcher_send(hass, MQTT_DISCOVERY_NEW.format(component, 'mqtt'), payload) hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock() hass.data[CONFIG_ENTRY_IS_SETUP] = set() (await mqtt.async_subscribe(hass, (discovery_topic + '/#'), async_device_message_received, 0)) return True
2,637,539,386,003,770,000
Initialize of MQTT Discovery.
homeassistant/components/mqtt/discovery.py
async_start
arnisoph/home-assistant
python
async def async_start(hass: HomeAssistantType, discovery_topic, hass_config, config_entry=None) -> bool: async def async_device_message_received(topic, payload, qos): 'Process the received message.' match = TOPIC_MATCHER.match(topic) if (not match): return (_prefix_topic, component, node_id, object_id) = match.groups() if (component not in SUPPORTED_COMPONENTS): _LOGGER.warning('Component %s is not supported', component) return if payload: try: payload = json.loads(payload) except ValueError: _LOGGER.warning("Unable to parse JSON %s: '%s'", object_id, payload) return payload = dict(payload) for key in list(payload.keys()): abbreviated_key = key key = ABBREVIATIONS.get(key, key) payload[key] = payload.pop(abbreviated_key) if (TOPIC_BASE in payload): base = payload[TOPIC_BASE] for (key, value) in payload.items(): if (isinstance(value, str) and value): if ((value[0] == TOPIC_BASE) and key.endswith('_topic')): payload[key] = '{}{}'.format(base, value[1:]) if ((value[(- 1)] == TOPIC_BASE) and key.endswith('_topic')): payload[key] = '{}{}'.format(value[:(- 1)], base) discovery_id = (' '.join((node_id, object_id)) if node_id else object_id) discovery_hash = (component, discovery_id) if payload: if ((CONF_PLATFORM in payload) and ('schema' not in payload)): platform = payload[CONF_PLATFORM] if ((component in DEPRECATED_PLATFORM_TO_SCHEMA) and (platform in DEPRECATED_PLATFORM_TO_SCHEMA[component])): schema = DEPRECATED_PLATFORM_TO_SCHEMA[component][platform] payload['schema'] = schema _LOGGER.warning('"platform": "%s" is deprecated, replace with "schema":"%s"', platform, schema) payload[CONF_PLATFORM] = 'mqtt' if (CONF_STATE_TOPIC not in payload): payload[CONF_STATE_TOPIC] = '{}/{}/{}{}/state'.format(discovery_topic, component, (('%s/' % node_id) if node_id else ), object_id) payload[ATTR_DISCOVERY_HASH] = discovery_hash if (ALREADY_DISCOVERED not in hass.data): hass.data[ALREADY_DISCOVERED] = {} if (discovery_hash in hass.data[ALREADY_DISCOVERED]): _LOGGER.info('Component has already been discovered: %s %s, sending update', component, discovery_id) async_dispatcher_send(hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), payload) elif payload: _LOGGER.info('Found new component: %s %s', component, discovery_id) hass.data[ALREADY_DISCOVERED][discovery_hash] = None if (component not in CONFIG_ENTRY_COMPONENTS): (await async_load_platform(hass, component, 'mqtt', payload, hass_config)) return config_entries_key = '{}.{}'.format(component, 'mqtt') async with hass.data[DATA_CONFIG_ENTRY_LOCK]: if (config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]): (await hass.config_entries.async_forward_entry_setup(config_entry, component)) hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key) async_dispatcher_send(hass, MQTT_DISCOVERY_NEW.format(component, 'mqtt'), payload) hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock() hass.data[CONFIG_ENTRY_IS_SETUP] = set() (await mqtt.async_subscribe(hass, (discovery_topic + '/#'), async_device_message_received, 0)) return True
async def async_device_message_received(topic, payload, qos): 'Process the received message.' match = TOPIC_MATCHER.match(topic) if (not match): return (_prefix_topic, component, node_id, object_id) = match.groups() if (component not in SUPPORTED_COMPONENTS): _LOGGER.warning('Component %s is not supported', component) return if payload: try: payload = json.loads(payload) except ValueError: _LOGGER.warning("Unable to parse JSON %s: '%s'", object_id, payload) return payload = dict(payload) for key in list(payload.keys()): abbreviated_key = key key = ABBREVIATIONS.get(key, key) payload[key] = payload.pop(abbreviated_key) if (TOPIC_BASE in payload): base = payload[TOPIC_BASE] for (key, value) in payload.items(): if (isinstance(value, str) and value): if ((value[0] == TOPIC_BASE) and key.endswith('_topic')): payload[key] = '{}{}'.format(base, value[1:]) if ((value[(- 1)] == TOPIC_BASE) and key.endswith('_topic')): payload[key] = '{}{}'.format(value[:(- 1)], base) discovery_id = (' '.join((node_id, object_id)) if node_id else object_id) discovery_hash = (component, discovery_id) if payload: if ((CONF_PLATFORM in payload) and ('schema' not in payload)): platform = payload[CONF_PLATFORM] if ((component in DEPRECATED_PLATFORM_TO_SCHEMA) and (platform in DEPRECATED_PLATFORM_TO_SCHEMA[component])): schema = DEPRECATED_PLATFORM_TO_SCHEMA[component][platform] payload['schema'] = schema _LOGGER.warning('"platform": "%s" is deprecated, replace with "schema":"%s"', platform, schema) payload[CONF_PLATFORM] = 'mqtt' if (CONF_STATE_TOPIC not in payload): payload[CONF_STATE_TOPIC] = '{}/{}/{}{}/state'.format(discovery_topic, component, (('%s/' % node_id) if node_id else ''), object_id) payload[ATTR_DISCOVERY_HASH] = discovery_hash if (ALREADY_DISCOVERED not in hass.data): hass.data[ALREADY_DISCOVERED] = {} if (discovery_hash in hass.data[ALREADY_DISCOVERED]): _LOGGER.info('Component has already been discovered: %s %s, sending update', component, discovery_id) async_dispatcher_send(hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), payload) elif payload: _LOGGER.info('Found new component: %s %s', component, discovery_id) hass.data[ALREADY_DISCOVERED][discovery_hash] = None if (component not in CONFIG_ENTRY_COMPONENTS): (await async_load_platform(hass, component, 'mqtt', payload, hass_config)) return config_entries_key = '{}.{}'.format(component, 'mqtt') async with hass.data[DATA_CONFIG_ENTRY_LOCK]: if (config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]): (await hass.config_entries.async_forward_entry_setup(config_entry, component)) hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key) async_dispatcher_send(hass, MQTT_DISCOVERY_NEW.format(component, 'mqtt'), payload)
-2,159,514,262,508,901,400
Process the received message.
homeassistant/components/mqtt/discovery.py
async_device_message_received
arnisoph/home-assistant
python
async def async_device_message_received(topic, payload, qos): match = TOPIC_MATCHER.match(topic) if (not match): return (_prefix_topic, component, node_id, object_id) = match.groups() if (component not in SUPPORTED_COMPONENTS): _LOGGER.warning('Component %s is not supported', component) return if payload: try: payload = json.loads(payload) except ValueError: _LOGGER.warning("Unable to parse JSON %s: '%s'", object_id, payload) return payload = dict(payload) for key in list(payload.keys()): abbreviated_key = key key = ABBREVIATIONS.get(key, key) payload[key] = payload.pop(abbreviated_key) if (TOPIC_BASE in payload): base = payload[TOPIC_BASE] for (key, value) in payload.items(): if (isinstance(value, str) and value): if ((value[0] == TOPIC_BASE) and key.endswith('_topic')): payload[key] = '{}{}'.format(base, value[1:]) if ((value[(- 1)] == TOPIC_BASE) and key.endswith('_topic')): payload[key] = '{}{}'.format(value[:(- 1)], base) discovery_id = (' '.join((node_id, object_id)) if node_id else object_id) discovery_hash = (component, discovery_id) if payload: if ((CONF_PLATFORM in payload) and ('schema' not in payload)): platform = payload[CONF_PLATFORM] if ((component in DEPRECATED_PLATFORM_TO_SCHEMA) and (platform in DEPRECATED_PLATFORM_TO_SCHEMA[component])): schema = DEPRECATED_PLATFORM_TO_SCHEMA[component][platform] payload['schema'] = schema _LOGGER.warning('"platform": "%s" is deprecated, replace with "schema":"%s"', platform, schema) payload[CONF_PLATFORM] = 'mqtt' if (CONF_STATE_TOPIC not in payload): payload[CONF_STATE_TOPIC] = '{}/{}/{}{}/state'.format(discovery_topic, component, (('%s/' % node_id) if node_id else ), object_id) payload[ATTR_DISCOVERY_HASH] = discovery_hash if (ALREADY_DISCOVERED not in hass.data): hass.data[ALREADY_DISCOVERED] = {} if (discovery_hash in hass.data[ALREADY_DISCOVERED]): _LOGGER.info('Component has already been discovered: %s %s, sending update', component, discovery_id) async_dispatcher_send(hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), payload) elif payload: _LOGGER.info('Found new component: %s %s', component, discovery_id) hass.data[ALREADY_DISCOVERED][discovery_hash] = None if (component not in CONFIG_ENTRY_COMPONENTS): (await async_load_platform(hass, component, 'mqtt', payload, hass_config)) return config_entries_key = '{}.{}'.format(component, 'mqtt') async with hass.data[DATA_CONFIG_ENTRY_LOCK]: if (config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]): (await hass.config_entries.async_forward_entry_setup(config_entry, component)) hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key) async_dispatcher_send(hass, MQTT_DISCOVERY_NEW.format(component, 'mqtt'), payload)
def __init__(self, inplanes, planes, scales=4, base_width=26, base_channels=64, stage_type='normal', **kwargs): 'Bottle2neck block for Res2Net.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n ' super(Bottle2neck, self).__init__(inplanes, planes, **kwargs) assert (scales > 1), 'Res2Net degenerates to ResNet when scales = 1.' width = int(math.floor((self.planes * (base_width / base_channels)))) (self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, (width * scales), postfix=1) (self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3) self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, (width * scales), kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) if ((stage_type == 'stage') and (self.conv2_stride != 1)): self.pool = nn.AvgPool2d(kernel_size=3, stride=self.conv2_stride, padding=1) convs = [] bns = [] fallback_on_stride = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if ((not self.with_dcn) or fallback_on_stride): for i in range((scales - 1)): convs.append(build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) else: assert (self.conv_cfg is None), 'conv_cfg must be None for DCN' for i in range((scales - 1)): convs.append(build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) self.conv3 = build_conv_layer(self.conv_cfg, (width * scales), (self.planes * self.expansion), kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.stage_type = stage_type self.scales = scales self.width = width delattr(self, 'conv2') delattr(self, self.norm2_name)
4,797,434,700,482,818,000
Bottle2neck block for Res2Net. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer.
detection/scrfd/mmdet/models/backbones/res2net.py
__init__
007gzs/insightface
python
def __init__(self, inplanes, planes, scales=4, base_width=26, base_channels=64, stage_type='normal', **kwargs): 'Bottle2neck block for Res2Net.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n ' super(Bottle2neck, self).__init__(inplanes, planes, **kwargs) assert (scales > 1), 'Res2Net degenerates to ResNet when scales = 1.' width = int(math.floor((self.planes * (base_width / base_channels)))) (self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, (width * scales), postfix=1) (self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3) self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, (width * scales), kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) if ((stage_type == 'stage') and (self.conv2_stride != 1)): self.pool = nn.AvgPool2d(kernel_size=3, stride=self.conv2_stride, padding=1) convs = [] bns = [] fallback_on_stride = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if ((not self.with_dcn) or fallback_on_stride): for i in range((scales - 1)): convs.append(build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) else: assert (self.conv_cfg is None), 'conv_cfg must be None for DCN' for i in range((scales - 1)): convs.append(build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) self.conv3 = build_conv_layer(self.conv_cfg, (width * scales), (self.planes * self.expansion), kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.stage_type = stage_type self.scales = scales self.width = width delattr(self, 'conv2') delattr(self, self.norm2_name)
def forward(self, x): 'Forward function.' def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) spx = torch.split(out, self.width, 1) sp = self.convs[0](spx[0].contiguous()) sp = self.relu(self.bns[0](sp)) out = sp for i in range(1, (self.scales - 1)): if (self.stage_type == 'stage'): sp = spx[i] else: sp = (sp + spx[i]) sp = self.convs[i](sp.contiguous()) sp = self.relu(self.bns[i](sp)) out = torch.cat((out, sp), 1) if ((self.stage_type == 'normal') or (self.conv2_stride == 1)): out = torch.cat((out, spx[(self.scales - 1)]), 1) elif (self.stage_type == 'stage'): out = torch.cat((out, self.pool(spx[(self.scales - 1)])), 1) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if (self.downsample is not None): identity = self.downsample(x) out += identity return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
1,846,693,992,825,146,000
Forward function.
detection/scrfd/mmdet/models/backbones/res2net.py
forward
007gzs/insightface
python
def forward(self, x): def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) spx = torch.split(out, self.width, 1) sp = self.convs[0](spx[0].contiguous()) sp = self.relu(self.bns[0](sp)) out = sp for i in range(1, (self.scales - 1)): if (self.stage_type == 'stage'): sp = spx[i] else: sp = (sp + spx[i]) sp = self.convs[i](sp.contiguous()) sp = self.relu(self.bns[i](sp)) out = torch.cat((out, sp), 1) if ((self.stage_type == 'normal') or (self.conv2_stride == 1)): out = torch.cat((out, spx[(self.scales - 1)]), 1) elif (self.stage_type == 'stage'): out = torch.cat((out, self.pool(spx[(self.scales - 1)])), 1) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if (self.downsample is not None): identity = self.downsample(x) out += identity return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
def init_weights(self, pretrained=None): 'Initialize the weights in backbone.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n ' if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif (pretrained is None): for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if (self.dcn is not None): for m in self.modules(): if isinstance(m, Bottle2neck): for n in m.convs: if hasattr(n, 'conv_offset'): constant_init(n.conv_offset, 0) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottle2neck): constant_init(m.norm3, 0) else: raise TypeError('pretrained must be a str or None')
-399,503,817,821,927,300
Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None.
detection/scrfd/mmdet/models/backbones/res2net.py
init_weights
007gzs/insightface
python
def init_weights(self, pretrained=None): 'Initialize the weights in backbone.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n ' if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif (pretrained is None): for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if (self.dcn is not None): for m in self.modules(): if isinstance(m, Bottle2neck): for n in m.convs: if hasattr(n, 'conv_offset'): constant_init(n.conv_offset, 0) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottle2neck): constant_init(m.norm3, 0) else: raise TypeError('pretrained must be a str or None')
def parse_assets(lines): ' Parse asset list\n\n :param string paste_string: An asset list string\n ' (matches, bad_lines) = regex_match_lines(ASSET_LIST_RE, lines) result = [{'name': name, 'quantity': (f_int(quantity) or 1), 'group': group, 'category': category, 'size': size, 'slot': slot, 'volume': volume, 'meta_level': meta_level, 'tech_level': tech_level} for (name, quantity, _, group, _, category, _, size, _, slot, _, volume, _, meta_level, _, tech_level) in matches] return (result, bad_lines)
6,874,263,680,403,479,000
Parse asset list :param string paste_string: An asset list string
eveparser/parsers/assets.py
parse_assets
Nothing4You/eveparser
python
def parse_assets(lines): ' Parse asset list\n\n :param string paste_string: An asset list string\n ' (matches, bad_lines) = regex_match_lines(ASSET_LIST_RE, lines) result = [{'name': name, 'quantity': (f_int(quantity) or 1), 'group': group, 'category': category, 'size': size, 'slot': slot, 'volume': volume, 'meta_level': meta_level, 'tech_level': tech_level} for (name, quantity, _, group, _, category, _, size, _, slot, _, volume, _, meta_level, _, tech_level) in matches] return (result, bad_lines)
def __init__(self, compute_client): 'Instantiate ZoneResourceFetcher and embed all required data into it.\n\n ZoneResourceFetcher is a class depending on "base_classes"\n class layout (properties side-derived from one of base_class class). This\n function can be used to avoid unfeasible inheritance and use composition\n instead when refactoring away from base_classes into stateless style.\n\n This constructor embeds following properties into ZoneResourceFetcher\n instance:\n - compute\n - messages\n - http\n - batch_url\n\n Example:\n compute_holder = base_classes.ComputeApiHolder(self.ReleaseTrack())\n client = compute_holder.client\n\n zone_resource_fetcher = ZoneResourceFetcher(client)\n or\n zone_resource_fetcher = ZoneResourceFetcher(self.compute_client)\n to use in a class derived from some of base_classes\n\n zone_resource_fetcher.WarnForZonalCreation(...)\n\n Args:\n compute_client: compute_holder.client\n ' self._compute = compute_client.apitools_client self._messages = compute_client.messages self._http = compute_client.apitools_client.http self._batch_url = compute_client.batch_url
4,449,017,669,771,330,600
Instantiate ZoneResourceFetcher and embed all required data into it. ZoneResourceFetcher is a class depending on "base_classes" class layout (properties side-derived from one of base_class class). This function can be used to avoid unfeasible inheritance and use composition instead when refactoring away from base_classes into stateless style. This constructor embeds following properties into ZoneResourceFetcher instance: - compute - messages - http - batch_url Example: compute_holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = compute_holder.client zone_resource_fetcher = ZoneResourceFetcher(client) or zone_resource_fetcher = ZoneResourceFetcher(self.compute_client) to use in a class derived from some of base_classes zone_resource_fetcher.WarnForZonalCreation(...) Args: compute_client: compute_holder.client
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/zone_utils.py
__init__
bopopescu/JobSniperRails
python
def __init__(self, compute_client): 'Instantiate ZoneResourceFetcher and embed all required data into it.\n\n ZoneResourceFetcher is a class depending on "base_classes"\n class layout (properties side-derived from one of base_class class). This\n function can be used to avoid unfeasible inheritance and use composition\n instead when refactoring away from base_classes into stateless style.\n\n This constructor embeds following properties into ZoneResourceFetcher\n instance:\n - compute\n - messages\n - http\n - batch_url\n\n Example:\n compute_holder = base_classes.ComputeApiHolder(self.ReleaseTrack())\n client = compute_holder.client\n\n zone_resource_fetcher = ZoneResourceFetcher(client)\n or\n zone_resource_fetcher = ZoneResourceFetcher(self.compute_client)\n to use in a class derived from some of base_classes\n\n zone_resource_fetcher.WarnForZonalCreation(...)\n\n Args:\n compute_client: compute_holder.client\n ' self._compute = compute_client.apitools_client self._messages = compute_client.messages self._http = compute_client.apitools_client.http self._batch_url = compute_client.batch_url
def GetZones(self, resource_refs): 'Fetches zone resources.' errors = [] requests = [] zone_names = set() for resource_ref in resource_refs: if (resource_ref.zone not in zone_names): zone_names.add(resource_ref.zone) requests.append((self._compute.zones, 'Get', self._messages.ComputeZonesGetRequest(project=resource_ref.project, zone=resource_ref.zone))) res = list(request_helper.MakeRequests(requests=requests, http=self._http, batch_url=self._batch_url, errors=errors)) if errors: return None else: return res
-4,250,735,275,922,151,000
Fetches zone resources.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/zone_utils.py
GetZones
bopopescu/JobSniperRails
python
def GetZones(self, resource_refs): errors = [] requests = [] zone_names = set() for resource_ref in resource_refs: if (resource_ref.zone not in zone_names): zone_names.add(resource_ref.zone) requests.append((self._compute.zones, 'Get', self._messages.ComputeZonesGetRequest(project=resource_ref.project, zone=resource_ref.zone))) res = list(request_helper.MakeRequests(requests=requests, http=self._http, batch_url=self._batch_url, errors=errors)) if errors: return None else: return res
def WarnForZonalCreation(self, resource_refs): 'Warns the user if a zone has upcoming deprecation.' zones = self.GetZones(resource_refs) if (not zones): return prompts = [] zones_with_deprecated = [] for zone in zones: if zone.deprecated: zones_with_deprecated.append(zone) if (not zones_with_deprecated): return if zones_with_deprecated: phrases = [] if (len(zones_with_deprecated) == 1): phrases = ('zone is', 'this zone', 'the') else: phrases = ('zones are', 'these zones', 'their') title = '\nWARNING: The following selected {0} deprecated. All resources in {1} will be deleted after {2} turndown date.'.format(phrases[0], phrases[1], phrases[2]) printable_deprecated_zones = [] for zone in zones_with_deprecated: if zone.deprecated.deleted: printable_deprecated_zones.append('[{0}] {1}'.format(zone.name, zone.deprecated.deleted)) else: printable_deprecated_zones.append('[{0}]'.format(zone.name)) prompts.append(utils.ConstructList(title, printable_deprecated_zones)) final_message = ' '.join(prompts) if (not console_io.PromptContinue(message=final_message)): raise calliope_exceptions.ToolException('Creation aborted by user.')
-5,013,486,056,412,300,000
Warns the user if a zone has upcoming deprecation.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/zone_utils.py
WarnForZonalCreation
bopopescu/JobSniperRails
python
def WarnForZonalCreation(self, resource_refs): zones = self.GetZones(resource_refs) if (not zones): return prompts = [] zones_with_deprecated = [] for zone in zones: if zone.deprecated: zones_with_deprecated.append(zone) if (not zones_with_deprecated): return if zones_with_deprecated: phrases = [] if (len(zones_with_deprecated) == 1): phrases = ('zone is', 'this zone', 'the') else: phrases = ('zones are', 'these zones', 'their') title = '\nWARNING: The following selected {0} deprecated. All resources in {1} will be deleted after {2} turndown date.'.format(phrases[0], phrases[1], phrases[2]) printable_deprecated_zones = [] for zone in zones_with_deprecated: if zone.deprecated.deleted: printable_deprecated_zones.append('[{0}] {1}'.format(zone.name, zone.deprecated.deleted)) else: printable_deprecated_zones.append('[{0}]'.format(zone.name)) prompts.append(utils.ConstructList(title, printable_deprecated_zones)) final_message = ' '.join(prompts) if (not console_io.PromptContinue(message=final_message)): raise calliope_exceptions.ToolException('Creation aborted by user.')
def run_forever(): 'Runs the asyncio event loop with and\n ensures state machines are exited upon a KeyboardInterrupt.\n ' loop = asyncio.get_event_loop() try: loop.run_forever() except KeyboardInterrupt: Framework.stop() loop.close()
-6,814,674,388,628,781,000
Runs the asyncio event loop with and ensures state machines are exited upon a KeyboardInterrupt.
farc/__init__.py
run_forever
SzeMengTan/farc
python
def run_forever(): 'Runs the asyncio event loop with and\n ensures state machines are exited upon a KeyboardInterrupt.\n ' loop = asyncio.get_event_loop() try: loop.run_forever() except KeyboardInterrupt: Framework.stop() loop.close()
@staticmethod def enable_spy(spy_cls): 'Sets the Spy to use the given class\n and calls its initializer.\n ' Spy._actv_cls = spy_cls spy_cls.init()
2,137,537,250,594,512,100
Sets the Spy to use the given class and calls its initializer.
farc/__init__.py
enable_spy
SzeMengTan/farc
python
@staticmethod def enable_spy(spy_cls): 'Sets the Spy to use the given class\n and calls its initializer.\n ' Spy._actv_cls = spy_cls spy_cls.init()
def __getattr__(*args): 'Returns\n 1) the enable_spy static method if requested by name, or\n 2) the attribute from the active class (if active class was set), or\n 3) a function that swallows any arguments and does nothing.\n ' if (args[1] == 'enable_spy'): return Spy.enable_spy if Spy._actv_cls: return getattr(Spy._actv_cls, args[1]) return (lambda *x: None)
6,580,064,896,499,551,000
Returns 1) the enable_spy static method if requested by name, or 2) the attribute from the active class (if active class was set), or 3) a function that swallows any arguments and does nothing.
farc/__init__.py
__getattr__
SzeMengTan/farc
python
def __getattr__(*args): 'Returns\n 1) the enable_spy static method if requested by name, or\n 2) the attribute from the active class (if active class was set), or\n 3) a function that swallows any arguments and does nothing.\n ' if (args[1] == 'enable_spy'): return Spy.enable_spy if Spy._actv_cls: return getattr(Spy._actv_cls, args[1]) return (lambda *x: None)
@staticmethod def exists(signame): 'Returns True if signame is in the Signal registry.\n ' return (signame in Signal._registry)
-1,967,252,211,100,177,000
Returns True if signame is in the Signal registry.
farc/__init__.py
exists
SzeMengTan/farc
python
@staticmethod def exists(signame): '\n ' return (signame in Signal._registry)
@staticmethod def register(signame): 'Registers the signame if it is not already registered.\n Returns the signal number for the signame.\n ' assert (type(signame) is str) if (signame in Signal._registry): return Signal._registry[signame] else: sigid = len(Signal._lookup) Signal._registry[signame] = sigid Signal._lookup.append(signame) Spy.on_signal_register(signame, sigid) return sigid
-5,932,514,422,443,632,000
Registers the signame if it is not already registered. Returns the signal number for the signame.
farc/__init__.py
register
SzeMengTan/farc
python
@staticmethod def register(signame): 'Registers the signame if it is not already registered.\n Returns the signal number for the signame.\n ' assert (type(signame) is str) if (signame in Signal._registry): return Signal._registry[signame] else: sigid = len(Signal._lookup) Signal._registry[signame] = sigid Signal._lookup.append(signame) Spy.on_signal_register(signame, sigid) return sigid
def __init__(self): "Sets this Hsm's current state to Hsm.top(), the default state\n and stores the given initial state.\n " self.state = self.top self.initial_state = self._initial
-1,334,007,641,773,020,400
Sets this Hsm's current state to Hsm.top(), the default state and stores the given initial state.
farc/__init__.py
__init__
SzeMengTan/farc
python
def __init__(self): "Sets this Hsm's current state to Hsm.top(), the default state\n and stores the given initial state.\n " self.state = self.top self.initial_state = self._initial
def _initial(self, event): 'Raises a NotImplementedError to force the derived class\n to implement its own initial state.\n ' raise NotImplementedError
6,814,616,314,063,496,000
Raises a NotImplementedError to force the derived class to implement its own initial state.
farc/__init__.py
_initial
SzeMengTan/farc
python
def _initial(self, event): 'Raises a NotImplementedError to force the derived class\n to implement its own initial state.\n ' raise NotImplementedError
def state(func): 'A decorator that identifies which methods are states.\n The presence of the farc_state attr, not the value of the attr,\n determines statehood.\n The Spy debugging system uses the farc_state attribute\n to determine which methods inside a class are actually states.\n Other uses of the attribute may come in the future.\n ' @wraps(func) def func_wrap(self, evt): result = func(self, evt) Spy.on_state_handler_called(func_wrap, evt, result) return result setattr(func_wrap, 'farc_state', True) return staticmethod(func_wrap)
4,006,958,211,435,342,000
A decorator that identifies which methods are states. The presence of the farc_state attr, not the value of the attr, determines statehood. The Spy debugging system uses the farc_state attribute to determine which methods inside a class are actually states. Other uses of the attribute may come in the future.
farc/__init__.py
state
SzeMengTan/farc
python
def state(func): 'A decorator that identifies which methods are states.\n The presence of the farc_state attr, not the value of the attr,\n determines statehood.\n The Spy debugging system uses the farc_state attribute\n to determine which methods inside a class are actually states.\n Other uses of the attribute may come in the future.\n ' @wraps(func) def func_wrap(self, evt): result = func(self, evt) Spy.on_state_handler_called(func_wrap, evt, result) return result setattr(func_wrap, 'farc_state', True) return staticmethod(func_wrap)
@state def top(me, event): "This is the default state handler.\n This handler ignores all signals except\n the POSIX-like events, SIGINT/SIGTERM.\n Handling SIGINT/SIGTERM here causes the Exit path\n to be executed from the application's active state\n to top/here.\n The application may put something useful\n or nothing at all in the Exit path.\n " if (Event.SIGINT == event): return Hsm.RET_HANDLED if (Event.SIGTERM == event): return Hsm.RET_HANDLED return Hsm.RET_IGNORED
4,357,347,042,863,078,000
This is the default state handler. This handler ignores all signals except the POSIX-like events, SIGINT/SIGTERM. Handling SIGINT/SIGTERM here causes the Exit path to be executed from the application's active state to top/here. The application may put something useful or nothing at all in the Exit path.
farc/__init__.py
top
SzeMengTan/farc
python
@state def top(me, event): "This is the default state handler.\n This handler ignores all signals except\n the POSIX-like events, SIGINT/SIGTERM.\n Handling SIGINT/SIGTERM here causes the Exit path\n to be executed from the application's active state\n to top/here.\n The application may put something useful\n or nothing at all in the Exit path.\n " if (Event.SIGINT == event): return Hsm.RET_HANDLED if (Event.SIGTERM == event): return Hsm.RET_HANDLED return Hsm.RET_IGNORED
@staticmethod def _perform_init_chain(me, current): 'Act on the chain of initializations required starting from current.\n ' t = current while (Hsm.trig(me, (t if (t != Hsm.top) else me.initial_state), Signal.INIT) == Hsm.RET_TRAN): path = [] while (me.state != t): path.append(me.state) Hsm.trig(me, me.state, Signal.EMPTY) me.state = path[0] assert (len(path) < 32) path.reverse() for s in path: Hsm.enter(me, s) t = path[(- 1)] return t
-3,732,168,619,316,651,000
Act on the chain of initializations required starting from current.
farc/__init__.py
_perform_init_chain
SzeMengTan/farc
python
@staticmethod def _perform_init_chain(me, current): '\n ' t = current while (Hsm.trig(me, (t if (t != Hsm.top) else me.initial_state), Signal.INIT) == Hsm.RET_TRAN): path = [] while (me.state != t): path.append(me.state) Hsm.trig(me, me.state, Signal.EMPTY) me.state = path[0] assert (len(path) < 32) path.reverse() for s in path: Hsm.enter(me, s) t = path[(- 1)] return t
@staticmethod def init(me, event=None): 'Transitions to the initial state. Follows any INIT transitions\n from the inital state and performs ENTRY actions as it proceeds.\n Use this to pass any parameters to initialize the state machine.\n p. 172\n ' me.state = Hsm._perform_init_chain(me, Hsm.top)
-8,697,070,345,986,381,000
Transitions to the initial state. Follows any INIT transitions from the inital state and performs ENTRY actions as it proceeds. Use this to pass any parameters to initialize the state machine. p. 172
farc/__init__.py
init
SzeMengTan/farc
python
@staticmethod def init(me, event=None): 'Transitions to the initial state. Follows any INIT transitions\n from the inital state and performs ENTRY actions as it proceeds.\n Use this to pass any parameters to initialize the state machine.\n p. 172\n ' me.state = Hsm._perform_init_chain(me, Hsm.top)
@staticmethod def dispatch(me, event): "Dispatches the given event to this Hsm.\n Follows the application's state transitions\n until the event is handled or top() is reached\n p. 174\n " Spy.on_hsm_dispatch_event(event) t = me.state exit_path = [] r = Hsm.RET_SUPER while (r == Hsm.RET_SUPER): s = me.state exit_path.append(s) Spy.on_hsm_dispatch_pre(s) r = s(me, event) Spy.on_hsm_dispatch_post(exit_path) if (r == Hsm.RET_TRAN): t = me.state for st in exit_path[:(- 1)]: r = Hsm.exit(me, st) assert ((r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED)) s = exit_path[(- 1)] Hsm._perform_transition(me, s, t) t = Hsm._perform_init_chain(me, t) me.state = t
-8,821,831,792,432,782,000
Dispatches the given event to this Hsm. Follows the application's state transitions until the event is handled or top() is reached p. 174
farc/__init__.py
dispatch
SzeMengTan/farc
python
@staticmethod def dispatch(me, event): "Dispatches the given event to this Hsm.\n Follows the application's state transitions\n until the event is handled or top() is reached\n p. 174\n " Spy.on_hsm_dispatch_event(event) t = me.state exit_path = [] r = Hsm.RET_SUPER while (r == Hsm.RET_SUPER): s = me.state exit_path.append(s) Spy.on_hsm_dispatch_pre(s) r = s(me, event) Spy.on_hsm_dispatch_post(exit_path) if (r == Hsm.RET_TRAN): t = me.state for st in exit_path[:(- 1)]: r = Hsm.exit(me, st) assert ((r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED)) s = exit_path[(- 1)] Hsm._perform_transition(me, s, t) t = Hsm._perform_init_chain(me, t) me.state = t
@staticmethod def post(event, act): "Posts the event to the given Ahsm's event queue.\n The argument, act, is an Ahsm instance.\n " assert isinstance(act, Ahsm) act.postFIFO(event)
1,225,549,252,378,367,700
Posts the event to the given Ahsm's event queue. The argument, act, is an Ahsm instance.
farc/__init__.py
post
SzeMengTan/farc
python
@staticmethod def post(event, act): "Posts the event to the given Ahsm's event queue.\n The argument, act, is an Ahsm instance.\n " assert isinstance(act, Ahsm) act.postFIFO(event)
@staticmethod def post_by_name(event, act_name): "Posts the event to the given Ahsm's event queue.\n The argument, act, is a string of the name of the class\n to which the event is sent. The event will post to all actors\n having the given classname.\n " assert (type(act_name) is str) for act in Framework._ahsm_registry: if (act.__class__.__name__ == act_name): act.postFIFO(event)
-6,567,935,478,147,241,000
Posts the event to the given Ahsm's event queue. The argument, act, is a string of the name of the class to which the event is sent. The event will post to all actors having the given classname.
farc/__init__.py
post_by_name
SzeMengTan/farc
python
@staticmethod def post_by_name(event, act_name): "Posts the event to the given Ahsm's event queue.\n The argument, act, is a string of the name of the class\n to which the event is sent. The event will post to all actors\n having the given classname.\n " assert (type(act_name) is str) for act in Framework._ahsm_registry: if (act.__class__.__name__ == act_name): act.postFIFO(event)
@staticmethod def publish(event): "Posts the event to the message queue of every Ahsm\n that is subscribed to the event's signal.\n " if (event.signal in Framework._subscriber_table): for act in Framework._subscriber_table[event.signal]: act.postFIFO(event) Framework._event_loop.call_soon_threadsafe(Framework.run)
-3,316,009,976,515,112,000
Posts the event to the message queue of every Ahsm that is subscribed to the event's signal.
farc/__init__.py
publish
SzeMengTan/farc
python
@staticmethod def publish(event): "Posts the event to the message queue of every Ahsm\n that is subscribed to the event's signal.\n " if (event.signal in Framework._subscriber_table): for act in Framework._subscriber_table[event.signal]: act.postFIFO(event) Framework._event_loop.call_soon_threadsafe(Framework.run)
@staticmethod def subscribe(signame, act): 'Adds the given Ahsm to the subscriber table list\n for the given signal. The argument, signame, is a string of the name\n of the Signal to which the Ahsm is subscribing. Using a string allows\n the Signal to be created in the registry if it is not already.\n ' sigid = Signal.register(signame) if (sigid not in Framework._subscriber_table): Framework._subscriber_table[sigid] = [] Framework._subscriber_table[sigid].append(act)
7,131,938,376,864,232,000
Adds the given Ahsm to the subscriber table list for the given signal. The argument, signame, is a string of the name of the Signal to which the Ahsm is subscribing. Using a string allows the Signal to be created in the registry if it is not already.
farc/__init__.py
subscribe
SzeMengTan/farc
python
@staticmethod def subscribe(signame, act): 'Adds the given Ahsm to the subscriber table list\n for the given signal. The argument, signame, is a string of the name\n of the Signal to which the Ahsm is subscribing. Using a string allows\n the Signal to be created in the registry if it is not already.\n ' sigid = Signal.register(signame) if (sigid not in Framework._subscriber_table): Framework._subscriber_table[sigid] = [] Framework._subscriber_table[sigid].append(act)
@staticmethod def addTimeEvent(tm_event, delta): "Adds the TimeEvent to the list of time events in the Framework.\n The event will fire its signal (to the TimeEvent's target Ahsm)\n after the delay, delta.\n " expiration = (Framework._event_loop.time() + delta) Framework.addTimeEventAt(tm_event, expiration)
-1,549,828,390,167,262,000
Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) after the delay, delta.
farc/__init__.py
addTimeEvent
SzeMengTan/farc
python
@staticmethod def addTimeEvent(tm_event, delta): "Adds the TimeEvent to the list of time events in the Framework.\n The event will fire its signal (to the TimeEvent's target Ahsm)\n after the delay, delta.\n " expiration = (Framework._event_loop.time() + delta) Framework.addTimeEventAt(tm_event, expiration)
@staticmethod def addTimeEventAt(tm_event, abs_time): "Adds the TimeEvent to the list of time events in the Framework.\n The event will fire its signal (to the TimeEvent's target Ahsm)\n at the given absolute time (_event_loop.time()).\n " assert (tm_event not in Framework._time_events.values()) Framework._insortTimeEvent(tm_event, abs_time)
5,152,183,799,103,508,000
Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) at the given absolute time (_event_loop.time()).
farc/__init__.py
addTimeEventAt
SzeMengTan/farc
python
@staticmethod def addTimeEventAt(tm_event, abs_time): "Adds the TimeEvent to the list of time events in the Framework.\n The event will fire its signal (to the TimeEvent's target Ahsm)\n at the given absolute time (_event_loop.time()).\n " assert (tm_event not in Framework._time_events.values()) Framework._insortTimeEvent(tm_event, abs_time)
@staticmethod def _insortTimeEvent(tm_event, expiration): 'Inserts a TimeEvent into the list of time events,\n sorted by the next expiration of the timer.\n If the expiration time matches an existing expiration,\n we add the smallest amount of time to the given expiration\n to avoid a key collision in the Dict\n and make the identically-timed events fire in a FIFO fashion.\n ' now = Framework._event_loop.time() if (expiration < now): tm_event.act.postFIFO(tm_event) while (expiration in Framework._time_events.keys()): (m, e) = math.frexp(expiration) expiration = ((m + sys.float_info.epsilon) * (2 ** e)) Framework._time_events[expiration] = tm_event if (len(Framework._time_events) == 1): Framework._tm_event_handle = Framework._event_loop.call_at(expiration, Framework.timeEventCallback, tm_event, expiration) elif (expiration < min(Framework._time_events.keys())): Framework._tm_event_handle.cancel() Framework._tm_event_handle = Framework._event_loop.call_at(expiration, Framework.timeEventCallback, tm_event, expiration)
9,202,879,888,713,282,000
Inserts a TimeEvent into the list of time events, sorted by the next expiration of the timer. If the expiration time matches an existing expiration, we add the smallest amount of time to the given expiration to avoid a key collision in the Dict and make the identically-timed events fire in a FIFO fashion.
farc/__init__.py
_insortTimeEvent
SzeMengTan/farc
python
@staticmethod def _insortTimeEvent(tm_event, expiration): 'Inserts a TimeEvent into the list of time events,\n sorted by the next expiration of the timer.\n If the expiration time matches an existing expiration,\n we add the smallest amount of time to the given expiration\n to avoid a key collision in the Dict\n and make the identically-timed events fire in a FIFO fashion.\n ' now = Framework._event_loop.time() if (expiration < now): tm_event.act.postFIFO(tm_event) while (expiration in Framework._time_events.keys()): (m, e) = math.frexp(expiration) expiration = ((m + sys.float_info.epsilon) * (2 ** e)) Framework._time_events[expiration] = tm_event if (len(Framework._time_events) == 1): Framework._tm_event_handle = Framework._event_loop.call_at(expiration, Framework.timeEventCallback, tm_event, expiration) elif (expiration < min(Framework._time_events.keys())): Framework._tm_event_handle.cancel() Framework._tm_event_handle = Framework._event_loop.call_at(expiration, Framework.timeEventCallback, tm_event, expiration)
@staticmethod def removeTimeEvent(tm_event): "Removes the TimeEvent from the list of active time events.\n Cancels the TimeEvent's callback if there is one.\n Schedules the next event's callback if there is one.\n " for (k, v) in Framework._time_events.items(): if (v is tm_event): if (k == min(Framework._time_events.keys())): del Framework._time_events[k] if Framework._tm_event_handle: Framework._tm_event_handle.cancel() if (len(Framework._time_events) > 0): next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = Framework._event_loop.call_at(next_expiration, Framework.timeEventCallback, next_event, next_expiration) else: Framework._tm_event_handle = None else: del Framework._time_events[k] break
9,140,786,555,416,009,000
Removes the TimeEvent from the list of active time events. Cancels the TimeEvent's callback if there is one. Schedules the next event's callback if there is one.
farc/__init__.py
removeTimeEvent
SzeMengTan/farc
python
@staticmethod def removeTimeEvent(tm_event): "Removes the TimeEvent from the list of active time events.\n Cancels the TimeEvent's callback if there is one.\n Schedules the next event's callback if there is one.\n " for (k, v) in Framework._time_events.items(): if (v is tm_event): if (k == min(Framework._time_events.keys())): del Framework._time_events[k] if Framework._tm_event_handle: Framework._tm_event_handle.cancel() if (len(Framework._time_events) > 0): next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = Framework._event_loop.call_at(next_expiration, Framework.timeEventCallback, next_event, next_expiration) else: Framework._tm_event_handle = None else: del Framework._time_events[k] break
@staticmethod def timeEventCallback(tm_event, expiration): "The callback function for all TimeEvents.\n Posts the event to the event's target Ahsm.\n If the TimeEvent is periodic, re-insort the event\n in the list of active time events.\n " assert (expiration in Framework._time_events.keys()), ('Exp:%d _time_events.keys():%s' % (expiration, Framework._time_events.keys())) del Framework._time_events[expiration] Framework._tm_event_handle = None tm_event.act.postFIFO(tm_event) if (tm_event.interval > 0): Framework._insortTimeEvent(tm_event, (expiration + tm_event.interval)) if ((Framework._tm_event_handle == None) and (len(Framework._time_events) > 0)): next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = Framework._event_loop.call_at(next_expiration, Framework.timeEventCallback, next_event, next_expiration) Framework._event_loop.call_soon_threadsafe(Framework.run)
2,536,676,419,023,421,000
The callback function for all TimeEvents. Posts the event to the event's target Ahsm. If the TimeEvent is periodic, re-insort the event in the list of active time events.
farc/__init__.py
timeEventCallback
SzeMengTan/farc
python
@staticmethod def timeEventCallback(tm_event, expiration): "The callback function for all TimeEvents.\n Posts the event to the event's target Ahsm.\n If the TimeEvent is periodic, re-insort the event\n in the list of active time events.\n " assert (expiration in Framework._time_events.keys()), ('Exp:%d _time_events.keys():%s' % (expiration, Framework._time_events.keys())) del Framework._time_events[expiration] Framework._tm_event_handle = None tm_event.act.postFIFO(tm_event) if (tm_event.interval > 0): Framework._insortTimeEvent(tm_event, (expiration + tm_event.interval)) if ((Framework._tm_event_handle == None) and (len(Framework._time_events) > 0)): next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = Framework._event_loop.call_at(next_expiration, Framework.timeEventCallback, next_event, next_expiration) Framework._event_loop.call_soon_threadsafe(Framework.run)
@staticmethod def add(act): 'Makes the framework aware of the given Ahsm.\n ' Framework._ahsm_registry.append(act) assert (act.priority not in Framework._priority_dict), 'Priority MUST be unique' Framework._priority_dict[act.priority] = act Spy.on_framework_add(act)
4,218,832,176,318,824,000
Makes the framework aware of the given Ahsm.
farc/__init__.py
add
SzeMengTan/farc
python
@staticmethod def add(act): '\n ' Framework._ahsm_registry.append(act) assert (act.priority not in Framework._priority_dict), 'Priority MUST be unique' Framework._priority_dict[act.priority] = act Spy.on_framework_add(act)
@staticmethod def run(): 'Dispatches an event to the highest priority Ahsm\n until all event queues are empty (i.e. Run To Completion).\n ' getPriority = (lambda x: x.priority) while True: allQueuesEmpty = True sorted_acts = sorted(Framework._ahsm_registry, key=getPriority) for act in sorted_acts: if act.has_msgs(): event_next = act.pop_msg() act.dispatch(act, event_next) allQueuesEmpty = False break if allQueuesEmpty: return
7,207,906,900,246,715,000
Dispatches an event to the highest priority Ahsm until all event queues are empty (i.e. Run To Completion).
farc/__init__.py
run
SzeMengTan/farc
python
@staticmethod def run(): 'Dispatches an event to the highest priority Ahsm\n until all event queues are empty (i.e. Run To Completion).\n ' getPriority = (lambda x: x.priority) while True: allQueuesEmpty = True sorted_acts = sorted(Framework._ahsm_registry, key=getPriority) for act in sorted_acts: if act.has_msgs(): event_next = act.pop_msg() act.dispatch(act, event_next) allQueuesEmpty = False break if allQueuesEmpty: return
@staticmethod def stop(): 'EXITs all Ahsms and stops the event loop.\n ' if Framework._tm_event_handle: Framework._tm_event_handle.cancel() Framework._tm_event_handle = None for act in Framework._ahsm_registry: Framework.post(Event.EXIT, act) Framework.run() Framework._event_loop.stop() Spy.on_framework_stop()
-4,242,969,735,239,040,500
EXITs all Ahsms and stops the event loop.
farc/__init__.py
stop
SzeMengTan/farc
python
@staticmethod def stop(): '\n ' if Framework._tm_event_handle: Framework._tm_event_handle.cancel() Framework._tm_event_handle = None for act in Framework._ahsm_registry: Framework.post(Event.EXIT, act) Framework.run() Framework._event_loop.stop() Spy.on_framework_stop()
@staticmethod def print_info(): 'Prints the name and current state\n of each actor in the framework.\n Meant to be called when ctrl+T (SIGINFO/29) is issued.\n ' for act in Framework._ahsm_registry: print(act.__class__.__name__, act.state.__name__)
-2,474,237,011,219,255,300
Prints the name and current state of each actor in the framework. Meant to be called when ctrl+T (SIGINFO/29) is issued.
farc/__init__.py
print_info
SzeMengTan/farc
python
@staticmethod def print_info(): 'Prints the name and current state\n of each actor in the framework.\n Meant to be called when ctrl+T (SIGINFO/29) is issued.\n ' for act in Framework._ahsm_registry: print(act.__class__.__name__, act.state.__name__)
def postAt(self, act, abs_time): 'Posts this TimeEvent to the given Ahsm at a specified time.\n ' assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEventAt(self, abs_time)
-3,514,636,557,849,529,300
Posts this TimeEvent to the given Ahsm at a specified time.
farc/__init__.py
postAt
SzeMengTan/farc
python
def postAt(self, act, abs_time): '\n ' assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEventAt(self, abs_time)
def postIn(self, act, delta): 'Posts this TimeEvent to the given Ahsm after the time delta.\n ' assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEvent(self, delta)
7,578,574,746,659,476,000
Posts this TimeEvent to the given Ahsm after the time delta.
farc/__init__.py
postIn
SzeMengTan/farc
python
def postIn(self, act, delta): '\n ' assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEvent(self, delta)
def postEvery(self, act, delta): 'Posts this TimeEvent to the given Ahsm after the time delta\n and every time delta thereafter until disarmed.\n ' assert issubclass(type(act), Ahsm) self.act = act self.interval = delta Framework.addTimeEvent(self, delta)
8,641,827,318,052,131,000
Posts this TimeEvent to the given Ahsm after the time delta and every time delta thereafter until disarmed.
farc/__init__.py
postEvery
SzeMengTan/farc
python
def postEvery(self, act, delta): 'Posts this TimeEvent to the given Ahsm after the time delta\n and every time delta thereafter until disarmed.\n ' assert issubclass(type(act), Ahsm) self.act = act self.interval = delta Framework.addTimeEvent(self, delta)
def disarm(self): "Removes this TimeEvent from the Framework's active time events.\n " self.act = None Framework.removeTimeEvent(self)
7,122,821,650,057,362,000
Removes this TimeEvent from the Framework's active time events.
farc/__init__.py
disarm
SzeMengTan/farc
python
def disarm(self): "\n " self.act = None Framework.removeTimeEvent(self)
@staticmethod def calcVariation(onetick, oneposition): '\n (tick: OneTick, position: OnePosition) -> float\n ' created = oneposition.priceMean() if (oneposition.side == OnePosition.SideLong): current = onetick.bid else: current = onetick.ask return (current / created)
2,954,959,345,308,836,000
(tick: OneTick, position: OnePosition) -> float
apps/trade/src/PositionsManager.py
calcVariation
kikei/btc-bot-ai
python
@staticmethod def calcVariation(onetick, oneposition): '\n \n ' created = oneposition.priceMean() if (oneposition.side == OnePosition.SideLong): current = onetick.bid else: current = onetick.ask return (current / created)
def _fit_imaging_from(fit: af.Fit, galaxies: List[ag.Galaxy], settings_imaging: aa.SettingsImaging=None, settings_pixelization: aa.SettingsPixelization=None, settings_inversion: aa.SettingsInversion=None, use_preloaded_grid: bool=True, use_hyper_scaling: bool=True) -> FitImaging: '\n Returns a `FitImaging` object from a PyAutoFit database `Fit` object and an instance of galaxies from a non-linear\n search model-fit.\n\n This function adds the `hyper_model_image` and `hyper_galaxy_image_path_dict` to the galaxies before performing the\n fit, if they were used.\n\n Parameters\n ----------\n fit\n A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits.\n galaxies\n A list of galaxies corresponding to a sample of a non-linear search and model-fit.\n\n Returns\n -------\n FitImaging\n The fit to the imaging dataset computed via an instance of galaxies.\n ' imaging = _imaging_from(fit=fit, settings_imaging=settings_imaging) tracer = _tracer_from(fit=fit, galaxies=galaxies) settings_pixelization = (settings_pixelization or fit.value(name='settings_pixelization')) settings_inversion = (settings_inversion or fit.value(name='settings_inversion')) preloads = Preloads(use_w_tilde=False) if use_preloaded_grid: sparse_grids_of_planes = fit.value(name='preload_sparse_grids_of_planes') if (sparse_grids_of_planes is not None): preloads = Preloads(sparse_image_plane_grid_pg_list=sparse_grids_of_planes, use_w_tilde=False) if (len(preloads.sparse_image_plane_grid_pg_list) == 2): if (type(preloads.sparse_image_plane_grid_pg_list[1]) != list): preloads.sparse_image_plane_grid_pg_list[1] = [preloads.sparse_image_plane_grid_pg_list[1]] return FitImaging(dataset=imaging, tracer=tracer, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, use_hyper_scaling=use_hyper_scaling)
-6,554,503,904,327,956,000
Returns a `FitImaging` object from a PyAutoFit database `Fit` object and an instance of galaxies from a non-linear search model-fit. This function adds the `hyper_model_image` and `hyper_galaxy_image_path_dict` to the galaxies before performing the fit, if they were used. Parameters ---------- fit A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits. galaxies A list of galaxies corresponding to a sample of a non-linear search and model-fit. Returns ------- FitImaging The fit to the imaging dataset computed via an instance of galaxies.
autolens/aggregator/fit_imaging.py
_fit_imaging_from
Jammy2211/AutoLens
python
def _fit_imaging_from(fit: af.Fit, galaxies: List[ag.Galaxy], settings_imaging: aa.SettingsImaging=None, settings_pixelization: aa.SettingsPixelization=None, settings_inversion: aa.SettingsInversion=None, use_preloaded_grid: bool=True, use_hyper_scaling: bool=True) -> FitImaging: '\n Returns a `FitImaging` object from a PyAutoFit database `Fit` object and an instance of galaxies from a non-linear\n search model-fit.\n\n This function adds the `hyper_model_image` and `hyper_galaxy_image_path_dict` to the galaxies before performing the\n fit, if they were used.\n\n Parameters\n ----------\n fit\n A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits.\n galaxies\n A list of galaxies corresponding to a sample of a non-linear search and model-fit.\n\n Returns\n -------\n FitImaging\n The fit to the imaging dataset computed via an instance of galaxies.\n ' imaging = _imaging_from(fit=fit, settings_imaging=settings_imaging) tracer = _tracer_from(fit=fit, galaxies=galaxies) settings_pixelization = (settings_pixelization or fit.value(name='settings_pixelization')) settings_inversion = (settings_inversion or fit.value(name='settings_inversion')) preloads = Preloads(use_w_tilde=False) if use_preloaded_grid: sparse_grids_of_planes = fit.value(name='preload_sparse_grids_of_planes') if (sparse_grids_of_planes is not None): preloads = Preloads(sparse_image_plane_grid_pg_list=sparse_grids_of_planes, use_w_tilde=False) if (len(preloads.sparse_image_plane_grid_pg_list) == 2): if (type(preloads.sparse_image_plane_grid_pg_list[1]) != list): preloads.sparse_image_plane_grid_pg_list[1] = [preloads.sparse_image_plane_grid_pg_list[1]] return FitImaging(dataset=imaging, tracer=tracer, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, use_hyper_scaling=use_hyper_scaling)
def __init__(self, aggregator: af.Aggregator, settings_imaging: Optional[aa.SettingsImaging]=None, settings_pixelization: Optional[aa.SettingsPixelization]=None, settings_inversion: Optional[aa.SettingsInversion]=None, use_preloaded_grid: bool=True, use_hyper_scaling: bool=True): '\n Wraps a PyAutoFit aggregator in order to create generators of fits to imaging data, corresponding to the\n results of a non-linear search model-fit.\n ' super().__init__(aggregator=aggregator) self.settings_imaging = settings_imaging self.settings_pixelization = settings_pixelization self.settings_inversion = settings_inversion self.use_preloaded_grid = use_preloaded_grid self.use_hyper_scaling = use_hyper_scaling
-2,135,738,284,995,956,000
Wraps a PyAutoFit aggregator in order to create generators of fits to imaging data, corresponding to the results of a non-linear search model-fit.
autolens/aggregator/fit_imaging.py
__init__
Jammy2211/AutoLens
python
def __init__(self, aggregator: af.Aggregator, settings_imaging: Optional[aa.SettingsImaging]=None, settings_pixelization: Optional[aa.SettingsPixelization]=None, settings_inversion: Optional[aa.SettingsInversion]=None, use_preloaded_grid: bool=True, use_hyper_scaling: bool=True): '\n Wraps a PyAutoFit aggregator in order to create generators of fits to imaging data, corresponding to the\n results of a non-linear search model-fit.\n ' super().__init__(aggregator=aggregator) self.settings_imaging = settings_imaging self.settings_pixelization = settings_pixelization self.settings_inversion = settings_inversion self.use_preloaded_grid = use_preloaded_grid self.use_hyper_scaling = use_hyper_scaling
def make_object_for_gen(self, fit, galaxies) -> FitImaging: '\n Creates a `FitImaging` object from a `ModelInstance` that contains the galaxies of a sample from a non-linear\n search.\n\n Parameters\n ----------\n fit\n A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits.\n galaxies\n A list of galaxies corresponding to a sample of a non-linear search and model-fit.\n\n Returns\n -------\n FitImaging\n A fit to imaging data whose galaxies are a sample of a PyAutoFit non-linear search.\n ' return _fit_imaging_from(fit=fit, galaxies=galaxies, settings_imaging=self.settings_imaging, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, use_preloaded_grid=self.use_preloaded_grid, use_hyper_scaling=self.use_hyper_scaling)
-5,418,655,718,074,749,000
Creates a `FitImaging` object from a `ModelInstance` that contains the galaxies of a sample from a non-linear search. Parameters ---------- fit A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits. galaxies A list of galaxies corresponding to a sample of a non-linear search and model-fit. Returns ------- FitImaging A fit to imaging data whose galaxies are a sample of a PyAutoFit non-linear search.
autolens/aggregator/fit_imaging.py
make_object_for_gen
Jammy2211/AutoLens
python
def make_object_for_gen(self, fit, galaxies) -> FitImaging: '\n Creates a `FitImaging` object from a `ModelInstance` that contains the galaxies of a sample from a non-linear\n search.\n\n Parameters\n ----------\n fit\n A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits.\n galaxies\n A list of galaxies corresponding to a sample of a non-linear search and model-fit.\n\n Returns\n -------\n FitImaging\n A fit to imaging data whose galaxies are a sample of a PyAutoFit non-linear search.\n ' return _fit_imaging_from(fit=fit, galaxies=galaxies, settings_imaging=self.settings_imaging, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, use_preloaded_grid=self.use_preloaded_grid, use_hyper_scaling=self.use_hyper_scaling)
def _velocity_to_redshift(velocity): '\n Convert a velocity to a relativistic redshift.\n ' beta = (velocity / C_KMS) return (np.sqrt(((1 + beta) / (1 - beta))) - 1)
-4,264,383,338,760,901,600
Convert a velocity to a relativistic redshift.
LI/lib/python3.8/site-packages/astropy/coordinates/spectral_coordinate.py
_velocity_to_redshift
honeybhardwaj/Language_Identification
python
def _velocity_to_redshift(velocity): '\n \n ' beta = (velocity / C_KMS) return (np.sqrt(((1 + beta) / (1 - beta))) - 1)
def _redshift_to_velocity(redshift): '\n Convert a relativistic redshift to a velocity.\n ' zponesq = ((1 + redshift) ** 2) return ((C_KMS * (zponesq - 1)) / (zponesq + 1))
-3,123,915,812,359,021,000
Convert a relativistic redshift to a velocity.
LI/lib/python3.8/site-packages/astropy/coordinates/spectral_coordinate.py
_redshift_to_velocity
honeybhardwaj/Language_Identification
python
def _redshift_to_velocity(redshift): '\n \n ' zponesq = ((1 + redshift) ** 2) return ((C_KMS * (zponesq - 1)) / (zponesq + 1))
def _apply_relativistic_doppler_shift(scoord, velocity): '\n Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity`\n that is Doppler shifted by this amount.\n\n Note that the Doppler shift applied is the full relativistic one, so\n `SpectralQuantity` currently expressed in velocity and not using the\n relativistic convention will temporarily be converted to use the\n relativistic convention while the shift is applied.\n\n Positive velocities are assumed to redshift the spectral quantity,\n while negative velocities blueshift the spectral quantity.\n ' squantity = scoord.view(SpectralQuantity) beta = (velocity / c) doppler_factor = np.sqrt(((1 + beta) / (1 - beta))) if squantity.unit.is_equivalent(u.m): return (squantity * doppler_factor) elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent((1 / u.m))): return (squantity / doppler_factor) elif squantity.unit.is_equivalent(KMS): return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: raise RuntimeError(f'Unexpected units in velocity shift: {squantity.unit}. This should not happen, so please report this in the astropy issue tracker!')
-837,969,382,452,347,500
Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity.
LI/lib/python3.8/site-packages/astropy/coordinates/spectral_coordinate.py
_apply_relativistic_doppler_shift
honeybhardwaj/Language_Identification
python
def _apply_relativistic_doppler_shift(scoord, velocity): '\n Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity`\n that is Doppler shifted by this amount.\n\n Note that the Doppler shift applied is the full relativistic one, so\n `SpectralQuantity` currently expressed in velocity and not using the\n relativistic convention will temporarily be converted to use the\n relativistic convention while the shift is applied.\n\n Positive velocities are assumed to redshift the spectral quantity,\n while negative velocities blueshift the spectral quantity.\n ' squantity = scoord.view(SpectralQuantity) beta = (velocity / c) doppler_factor = np.sqrt(((1 + beta) / (1 - beta))) if squantity.unit.is_equivalent(u.m): return (squantity * doppler_factor) elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent((1 / u.m))): return (squantity / doppler_factor) elif squantity.unit.is_equivalent(KMS): return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: raise RuntimeError(f'Unexpected units in velocity shift: {squantity.unit}. This should not happen, so please report this in the astropy issue tracker!')
def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): '\n Given an original coordinate object, update the differentials so that\n the final coordinate is at the same location as the original coordinate\n but co-moving with the velocity reference object.\n\n If preserve_original_frame is set to True, the resulting object will be in\n the frame of the original coordinate, otherwise it will be in the frame of\n the velocity reference.\n ' if (not velocity_reference.data.differentials): raise ValueError('Reference frame has no velocities') if (('obstime' in velocity_reference.frame_attributes) and hasattr(original, 'obstime')): velocity_reference = velocity_reference.replicate(obstime=original.obstime) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = original_icrs.data.represent_as(CartesianRepresentation).with_differentials(differentials) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential)
8,652,156,971,554,723,000
Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference.
LI/lib/python3.8/site-packages/astropy/coordinates/spectral_coordinate.py
update_differentials_to_match
honeybhardwaj/Language_Identification
python
def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): '\n Given an original coordinate object, update the differentials so that\n the final coordinate is at the same location as the original coordinate\n but co-moving with the velocity reference object.\n\n If preserve_original_frame is set to True, the resulting object will be in\n the frame of the original coordinate, otherwise it will be in the frame of\n the velocity reference.\n ' if (not velocity_reference.data.differentials): raise ValueError('Reference frame has no velocities') if (('obstime' in velocity_reference.frame_attributes) and hasattr(original, 'obstime')): velocity_reference = velocity_reference.replicate(obstime=original.obstime) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = original_icrs.data.represent_as(CartesianRepresentation).with_differentials(differentials) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential)
def attach_zero_velocities(coord): '\n Set the differentials to be stationary on a coordinate object.\n ' new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data)
624,241,151,493,268,200
Set the differentials to be stationary on a coordinate object.
LI/lib/python3.8/site-packages/astropy/coordinates/spectral_coordinate.py
attach_zero_velocities
honeybhardwaj/Language_Identification
python
def attach_zero_velocities(coord): '\n \n ' new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data)
@staticmethod def _validate_coordinate(coord, label=''): '\n Checks the type of the frame and whether a velocity differential and a\n distance has been defined on the frame object.\n\n If no distance is defined, the target is assumed to be "really far\n away", and the observer is assumed to be "in the solar system".\n\n Parameters\n ----------\n coord : `~astropy.coordinates.BaseCoordinateFrame`\n The new frame to be used for target or observer.\n label : str, optional\n The name of the object being validated (e.g. \'target\' or \'observer\'),\n which is then used in error messages.\n ' if (coord is None): return if (not issubclass(coord.__class__, BaseCoordinateFrame)): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f'{label} must be a SkyCoord or coordinate frame instance') with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if ((distance is not None) and (distance.unit.physical_type == 'dimensionless')): coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn(f'Distance on coordinate object is dimensionless, an abritrary distance value of {DEFAULT_DISTANCE} will be set instead.', NoDistanceWarning) if ('s' not in coord.data.differentials): warnings.warn('No velocity defined on frame, assuming {}.'.format(ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord
4,801,924,817,370,988,000
Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages.
LI/lib/python3.8/site-packages/astropy/coordinates/spectral_coordinate.py
_validate_coordinate
honeybhardwaj/Language_Identification
python
@staticmethod def _validate_coordinate(coord, label=): '\n Checks the type of the frame and whether a velocity differential and a\n distance has been defined on the frame object.\n\n If no distance is defined, the target is assumed to be "really far\n away", and the observer is assumed to be "in the solar system".\n\n Parameters\n ----------\n coord : `~astropy.coordinates.BaseCoordinateFrame`\n The new frame to be used for target or observer.\n label : str, optional\n The name of the object being validated (e.g. \'target\' or \'observer\'),\n which is then used in error messages.\n ' if (coord is None): return if (not issubclass(coord.__class__, BaseCoordinateFrame)): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f'{label} must be a SkyCoord or coordinate frame instance') with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if ((distance is not None) and (distance.unit.physical_type == 'dimensionless')): coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn(f'Distance on coordinate object is dimensionless, an abritrary distance value of {DEFAULT_DISTANCE} will be set instead.', NoDistanceWarning) if ('s' not in coord.data.differentials): warnings.warn('No velocity defined on frame, assuming {}.'.format(ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord
def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): '\n Return a replica of the `SpectralCoord`, optionally changing the\n values or attributes.\n\n Note that no conversion is carried out by this method - this keeps\n all the values and attributes the same, except for the ones explicitly\n passed to this method which are changed.\n\n If ``copy`` is set to `True` then a full copy of the internal arrays\n will be made. By default the replica will use a reference to the\n original arrays when possible to save memory.\n\n Parameters\n ----------\n value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional\n Spectral values, which should be either wavelength, frequency,\n energy, wavenumber, or velocity values.\n unit : str or `~astropy.units.Unit`\n Unit for the given spectral values.\n observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional\n The coordinate (position and velocity) of observer.\n target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional\n The coordinate (position and velocity) of target.\n radial_velocity : `~astropy.units.Quantity`, optional\n The radial velocity of the target with respect to the observer.\n redshift : float, optional\n The relativistic redshift of the target with respect to the observer.\n doppler_rest : `~astropy.units.Quantity`, optional\n The rest value to use when expressing the spectral value as a velocity.\n doppler_convention : str, optional\n The Doppler convention to use when expressing the spectral value as a velocity.\n copy : bool, optional\n If `True`, and ``value`` is not specified, the values are copied to\n the new `SkyCoord` - otherwise a reference to the same values is used.\n\n Returns\n -------\n sc : `SpectralCoord` object\n Replica of this object\n ' if isinstance(value, u.Quantity): if (unit is not None): raise ValueError('Cannot specify value as a Quantity and also specify unit') else: (value, unit) = (value.value, value.unit) value = (value if (value is not None) else self.value) unit = (unit or self.unit) observer = (self._validate_coordinate(observer) or self.observer) target = (self._validate_coordinate(target) or self.target) doppler_convention = (doppler_convention or self.doppler_convention) doppler_rest = (doppler_rest or self.doppler_rest) if copy: value = value.copy() if (((self.observer is None) or (self.target is None)) and (radial_velocity is None) and (redshift is None)): radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False)
7,753,664,982,849,848,000
Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object
LI/lib/python3.8/site-packages/astropy/coordinates/spectral_coordinate.py
replicate
honeybhardwaj/Language_Identification
python
def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): '\n Return a replica of the `SpectralCoord`, optionally changing the\n values or attributes.\n\n Note that no conversion is carried out by this method - this keeps\n all the values and attributes the same, except for the ones explicitly\n passed to this method which are changed.\n\n If ``copy`` is set to `True` then a full copy of the internal arrays\n will be made. By default the replica will use a reference to the\n original arrays when possible to save memory.\n\n Parameters\n ----------\n value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional\n Spectral values, which should be either wavelength, frequency,\n energy, wavenumber, or velocity values.\n unit : str or `~astropy.units.Unit`\n Unit for the given spectral values.\n observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional\n The coordinate (position and velocity) of observer.\n target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional\n The coordinate (position and velocity) of target.\n radial_velocity : `~astropy.units.Quantity`, optional\n The radial velocity of the target with respect to the observer.\n redshift : float, optional\n The relativistic redshift of the target with respect to the observer.\n doppler_rest : `~astropy.units.Quantity`, optional\n The rest value to use when expressing the spectral value as a velocity.\n doppler_convention : str, optional\n The Doppler convention to use when expressing the spectral value as a velocity.\n copy : bool, optional\n If `True`, and ``value`` is not specified, the values are copied to\n the new `SkyCoord` - otherwise a reference to the same values is used.\n\n Returns\n -------\n sc : `SpectralCoord` object\n Replica of this object\n ' if isinstance(value, u.Quantity): if (unit is not None): raise ValueError('Cannot specify value as a Quantity and also specify unit') else: (value, unit) = (value.value, value.unit) value = (value if (value is not None) else self.value) unit = (unit or self.unit) observer = (self._validate_coordinate(observer) or self.observer) target = (self._validate_coordinate(target) or self.target) doppler_convention = (doppler_convention or self.doppler_convention) doppler_rest = (doppler_rest or self.doppler_rest) if copy: value = value.copy() if (((self.observer is None) or (self.target is None)) and (radial_velocity is None) and (redshift is None)): radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False)
@property def quantity(self): '\n Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`.\n Equivalent to ``self.view(u.Quantity)``.\n\n Returns\n -------\n `~astropy.units.Quantity`\n This object viewed as a `~astropy.units.Quantity`.\n\n ' return self.view(u.Quantity)
1,214,218,226,791,068,700
Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`.
LI/lib/python3.8/site-packages/astropy/coordinates/spectral_coordinate.py
quantity
honeybhardwaj/Language_Identification
python
@property def quantity(self): '\n Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`.\n Equivalent to ``self.view(u.Quantity)``.\n\n Returns\n -------\n `~astropy.units.Quantity`\n This object viewed as a `~astropy.units.Quantity`.\n\n ' return self.view(u.Quantity)