code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> dict[str, float]:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only on... | 604 | def __UpperCAmelCase ( UpperCAmelCase = 50 )-> int:
"""simple docstring"""
lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start i... | 604 | 1 |
from __future__ import annotations
from math import pow, sqrt
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and onl... | 161 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCame... | 161 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ... | 521 |
from collections.abc import Sequence
from queue import Queue
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_S... | 590 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if no... | 709 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Any ) -> Any:
... | 342 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
_snake_case = TypeVar("T")
_snake_case = Union[List[T], Tuple[T, ...]]
_snake_case = Union[T, List[T], Dict[str, T]]
_snake_case = Union[str, bytes, os.PathLike]
| 500 |
import math
import sys
def A ( _lowerCamelCase ):
'''simple docstring'''
if number != int(_lowerCamelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input mu... | 500 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from ... | 703 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer... | 123 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
__magic_name__ : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co... | 102 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : str = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"... | 275 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import ... | 702 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
fr... | 504 | 0 |
from __future__ import annotations
def __a ( A__ : list , A__ : int , A__ : int , A__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input_list[low:mid], input_list[mid : high + 1]
while left... | 16 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"go... | 697 | 0 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
snake_case = 3
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
print('Generating primitive root of p' )
while True:
... | 708 |
"""simple docstring"""
from functools import reduce
snake_case = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'... | 406 | 0 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A_ ( snake_case_ : int ):
'''simple docstring'''
# A local function to see if a dot lands in the circle.
def is_in_circle(... | 499 |
"""simple docstring"""
import string
def A_ ( snake_case_ : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase : Optional[int] = """"""
for symbol in message:
if symbol in string.... | 499 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimens... | 719 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ... | 322 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCam... | 322 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAme... | 322 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase ( A__: Optional[int] , A__: str , ... | 710 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttentio... | 263 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class UpperCamelCase__ ( __magic_name__ ):
# `task` is not a ClassVar since we want it t... | 412 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggin... | 412 | 1 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase__ ( A__ : Tuple , A__ : str , A__ : Any , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase ... | 714 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logg... | 80 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE = [ord(letter) for let... | 94 |
"""simple docstring"""
from __future__ import annotations
def A ( snake_case__ ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(... | 196 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : int = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTower... | 364 |
import math
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : float = 1... | 364 | 1 |
"""simple docstring"""
__A = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_... | 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_av... | 374 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
A_ :str = log... | 716 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tok... | 154 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accele... | 89 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataC... | 89 | 1 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class ... | 719 |
from __future__ import annotations
_A : List[str] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
... | 189 | 0 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
snake_case__ = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , '''html.parser''' )
snake_case... | 33 | """simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
stooge(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return arr
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_... | 425 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_... | 607 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_model... | 607 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
class UpperCamelCase__ ( lo... | 458 |
import tensorflow as tf
from ...tf_utils import shape_list
class UpperCamelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=1, snake_case__=Fals... | 458 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_SCREAMING_SNAKE_CASE = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
... | 706 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lower... | 517 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils im... | 620 |
lowercase__ : Dict = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
a = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared ... | 515 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , *_a , **_a ) ... | 709 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ = logging.get... | 578 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
fro... | 86 |
from functools import lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> set:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
while i *... | 663 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimensio... | 648 |
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : A... | 648 | 1 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
snake_case = HfArgumentParser(InitializationArguments)
snake_case = parser.parse_args()
# ... | 103 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def ... | 46 | 0 |
def snake_case ( snake_case__ :Any) -> str:
assert isinstance(lowerCamelCase__ , lowerCamelCase__), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_A = F'''The input value of [n=... | 715 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A =... | 83 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ["torch", "scipy"]
def __init__( self : Union[str, Any] , *A : Li... | 231 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowercase ( __snake_c... | 231 | 1 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSa... | 512 | '''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from di... | 512 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
... | 76 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
... | 639 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logg... | 700 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_lowerCamelCase = '''path-to-your-trained-model'''
_lowerCamelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
_lowerCamelCase = '''A photo of sks... | 401 | 0 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAm... | 41 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationT... | 41 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.... | 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ... | 670 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
f... | 99 |
import argparse
import os
import re
import packaging.version
snake_case__ : List[Any] = '''examples/'''
snake_case__ : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''... | 392 | 0 |
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase_ = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": ... | 709 |
'''simple docstring'''
lowercase_ = 256
# Modulus to hash a string
lowercase_ = 1_000_003
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = len(__A)
_a = len(__A)
if p_len > t_len:
return False
_a ... | 352 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = args.pruning_method
lowercase__ = args.threshold
lowercase__... | 43 |
'''simple docstring'''
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
... | 620 | 0 |
"""simple docstring"""
from itertools import permutations
def lowercase (_lowerCAmelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowerCAmelCase ... | 710 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffu... | 573 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
UpperCamelCase = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceeding... | 520 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_... | 560 | 0 |
def UpperCAmelCase ( A__ , A__ , A__ ) -> List[Any]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(A__ , n - 1 , A__ ) * a) % mod
else:
_snake_case : List[str] = binary_exponentiation(A__ , ... | 709 |
def UpperCAmelCase ( A__ ) -> list[list[int]]:
_snake_case : List[str] = []
if len(A__ ) == 1:
return [nums.copy()]
for _ in range(len(A__ ) ):
_snake_case : Optional[Any] = nums.pop(0 )
_snake_case : Any = p... | 519 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_se... | 23 | '''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCamelCase( _A : Any , _A : List[str]=() , _A : List[str]=None ... | 614 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
... | 715 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ : Tuple ... | 440 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> List[str]: # noqa: E741
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = [0] * n
_lowerCAmelCase : List[str]... | 213 | """simple docstring"""
import os
import sys
import unittest
_a : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, creat... | 213 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : int
class lowerCamelCase__:
def __init__( self: ... | 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[index... | 80 | 1 |
from math import isclose, sqrt
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Tuple = point_y / 4 / point_x
_lowerCAmelCase : Optional[Any] = 2 * normal_gradient / (1 + no... | 424 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
a_ = 42 # Cache store of keys
a_ = 42 # References of the keys in... | 424 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_comm... | 720 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "AutoImageProcessor... | 199 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : int = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCH... | 641 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _lowerCAmelCase ( UpperCamelCase__: Any ) -> Optional[int]:
... | 641 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import Mod... | 708 |
'''simple docstring'''
a__ : dict[str, float] ={
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
a__ : dict[str, float] ={
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def lowercase__ ( _... | 434 | 0 |
def lowercase__ ( A_: list[list[int | float]] ) -> int:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
__UpperCAmelCase =len(matrix[0] )
__UpperCAmelCase =min(A_ , A_ )
for row in range(A_ ):
# ... | 68 | """simple docstring"""
from collections.abc import Generator
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = 0, 1
while True:
lowerCAmelCase__ , lowerCAmelCase__ = b, a + b
yield b
def _UpperCAmelCase ( ... | 644 | 0 |
"""simple docstring"""
import math
def snake_case_ ( A_ : List[Any] = 1_00 ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = sum(i * i for i in range(1, n + 1 ) )
_lowerCamelCase : Optional[int] = int(math.... | 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# ... | 598 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowercase : Optional[int] = logging.getLogger(__name__)
@dataclass
class a__ ( UpperCamelCase_ ... | 423 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
... | 297 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
... | 712 | import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase ... | 638 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def _SCREAMING_SNAKE_CASE ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and... | 108 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__a: Tuple = datasets.utils.logging.get_logg... | 108 | 1 |
import os
from distutils.util import strtobool
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Optional[Any] ):
'''simple docstring'''
for e in env_keys:
A_ : Optional[int] = int(os.environ.get(a__ ,-1 ) )
if val >= 0:
... | 715 | from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
... | 70 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://hugging... | 181 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_... | 181 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake... | 701 |
'''simple docstring'''
lowerCAmelCase__ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : List[Any] = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "F... | 329 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _a ( _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) /... | 26 |
'''simple docstring'''
import sys
__UpperCamelCase : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523... | 448 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class A__ ( __SCREAM... | 715 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> T... | 615 | 0 |
"""simple docstring"""
from PIL import Image
def snake_case ( _a: List[Any] )-> Image:
'''simple docstring'''
lowerCamelCase__ = image.size
lowerCamelCase__ = 0
lowerCamelCase__ = image.load()
for i in range(_lower... | 510 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'vocab.json'}
_Uppe... | 459 | 0 |
def __lowerCAmelCase ( snake_case : int , snake_case : int ) -> List[str]:
while a != 0:
__lowerCamelCase: str = b % a, a
return b
def __lowerCAmelCase ( snake_case : int , snake_case : int ) -> Optional... | 704 |
import math
def __lowerCAmelCase ( snake_case : int ) -> bool:
__lowerCamelCase: Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case )
def __lowerCAmelCase ( snake_case : float = 1 / 12345 ) ->... | 189 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _lowerCAmelCase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAme... | 166 | from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from... | 166 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowercase : Dict = object()
# For specifying empty leaf dict `{}`
_lowercase : Any = object()... | 30 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConf... | 30 | 1 |
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter impo... | 303 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_atte... | 123 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCAmelCase__ ( _a : str , _a : int=() , _a : List[Any]=None , _a : Lis... | 114 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Optional[Any] = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generati... | 114 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_ch... | 46 |
'''simple docstring'''
import random
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = num - 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE : List[str] = s // 2
t += 1
for _ in range(5 ):
... | 379 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( un... | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = ... | 11 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://hug... | 430 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ , A__ , A__ , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('Y... | 430 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
raise RuntimeError('CUDA out of memory.' )
cl... | 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...... | 673 | 1 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __UpperCame... | 536 |
"""simple docstring"""
import requests
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str , lowerCamelCase_: str ):
"""simple docstring"""
snake_case : List[str] = {"Content-Type": "application/json"}
snake_case : int ... | 449 | 0 |
import unittest
from transformers import DonutProcessor
_snake_case = 'naver-clova-ix/donut-base'
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = DonutProcessor.from_pre... | 706 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM ... | 567 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
fro... | 20 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common... | 20 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
... | 684 | 0 |
from __future__ import annotations
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(lowercase_ ):
print(F'{i}\t\t{d}' )
def UpperCamelCase ... | 12 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by... | 12 | 1 |
def snake_case ( UpperCAmelCase : list ):
if len(UpperCAmelCase ) < 2:
return collection
def circle_sort_util(UpperCAmelCase : list, UpperCAmelCase : int, UpperCAmelCase : int ) -> bool:
A = False
if low == high:
... | 110 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert im... | 110 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
fro... | 436 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not postfix_notation:
return 0
__magic_name__ : Optional[Any] = ... | 436 | 1 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import ja... | 121 | """simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCAmelCase : Dict = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ... | 121 | 1 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> tuple[float, float]:
"""simple docstring"""
if not len(__UpperCamelCase ) == len(__UpperCamelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] ==... | 610 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils impo... | 610 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : Union[str, Any] = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Bridg... | 715 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowercase__ ( __UpperCamelCase : Any , __UpperCamelCase : Any=None ):
'''simple docstring'''
... | 339 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCamelCase ( UpperCamelCase ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( UpperCAmelCase : ArgumentParser ) -> Optional[Any]:
... | 553 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any] ) -> List[str]:
lowerCAmelCase :Dict = str(id_ )
... | 553 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_UpperCAmelCase : List[Any] = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class lo... | 3 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook... | 3 | 1 |
from __future__ import annotations
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : Tuple = 2
lowercase__ : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
... | 164 |
from scipy.stats import spearmanr
import datasets
snake_case_ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply t... | 164 | 1 |
'''simple docstring'''
import string
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_a = ''''''
for symbol in message:
if symbol i... | 377 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = []
def ... | 377 | 1 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
_UpperCamelCase = 300 # TEMPERATURE (unit = K)
def _a ( _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
if donor_conc <= 0:
... | 341 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
fr... | 155 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase : Tuple = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCAmelCase : Union[str, Any] =... | 146 |
from math import ceil
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = list(range(0 , UpperCamelCase__ ... | 146 | 1 |
import copy
import re
class UpperCamelCase :
lowerCAmelCase : int = """hp"""
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Tuple = None
@classmethod
def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__ ):
... | 491 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_avai... | 34 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase_ ( ) -> Tuple:
raise RuntimeError('''CUDA out of memory.''' )
... | 369 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
... | 369 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtrac... | 62 |
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __lowerCamelCase ( lowerCamelCase__ : int = 5000 ):
'''simple docstring'''
lower... | 457 | 0 |
def _lowerCAmelCase ( __magic_name__ :int = 1_0_0 ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
... | 715 |
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :str ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__magic_name__ , n - 1 , __magic_name__ ) * a) % mod
... | 407 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCAmelCase ( a_ ) -> int:
"""simple doc... | 55 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-vision-base-ft"... | 74 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class ... | 640 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
... | 640 | 1 |
def _lowerCAmelCase ( __magic_name__ :list ):
if len(lowerCamelCase_ ) <= 1:
return lst
UpperCAmelCase_ = 1
while i < len(lowerCamelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAme... | 121 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
requir... | 502 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowercase : List[str] = TypeVar('''KT''')
lowercase : Optional[Any] = TypeVar('''VT''')
class UpperCAmelCase_ ( Generic[KT, VT] ):
'''simple docstring'''
... | 114 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : str = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Do... | 114 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerC... | 528 | """simple docstring"""
def _lowerCamelCase( a ):
return " ".join(
"".join(word[::-1] ) if len(a ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 528 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : ... | 630 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase (_A , _A ):
... | 630 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_mo... | 576 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __snake_case ( lowercase : np.ndarray , lowercase : float ):
# For applying gaussian function for each element in matrix.
snake_case_ = math.sqrt(lowercase )
snake_case_ = ... | 508 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from tra... | 173 | """simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCamelCase ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ... | 173 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.