Start a new Kumite
AllAgda (Beta)BF (Beta)CCFML (Beta)ClojureCOBOL (Beta)CoffeeScriptCommonLisp (Beta)CoqC++CrystalC#D (Beta)DartElixirElm (Beta)Erlang (Beta)Factor (Beta)Forth (Beta)Fortran (Beta)F#GoGroovyHaskellHaxe (Beta)Idris (Beta)JavaJavaScriptJulia (Beta)Kotlinλ Calculus (Beta)LeanLuaNASMNim (Beta)Objective-C (Beta)OCaml (Beta)Pascal (Beta)Perl (Beta)PHPPowerShell (Beta)Prolog (Beta)PureScript (Beta)PythonR (Beta)RacketRaku (Beta)Reason (Beta)RISC-V (Beta)RubyRustScalaShellSolidity (Beta)SQLSwiftTypeScriptVB (Beta)
Show only mine

Kumite (ko͞omiˌtā) is the practice of taking techniques learned from Kata and applying them through the act of freestyle sparring.

You can create a new kumite by providing some initial code and optionally some test cases. From there other warriors can spar with you, by enhancing, refactoring and translating your code. There is no limit to how many warriors you can spar with.

A great use for kumite is to begin an idea for a kata as one. You can collaborate with other code warriors until you have it right, then you can convert it to a kata.

Ad
Ad
Web Scraping
Code
Diff
  • from bs4 import BeautifulSoup
    import requests
    import shutil
    import os
    
    
    def get_codewars_stats(username):
        """Scraps, and retrieves Codewars stats of given username."""
    
        output = f'{username}\'s Codewars stats:\n'
        source = requests.get(f'https://www.codewars.com/users/{username}', stream=True)
    
        # Verify request status. Using 404 would miss a wide ranges of other failed connections.
        if source.status_code == 200:
            soup = BeautifulSoup(source.text, 'html.parser')
            stat_info = soup.findAll('div', class_='stat')
            important_values = [info.text for info in stat_info[:5] + stat_info[6:]]
    
            # Get url to users avatar/profile pic
            img_url = ''.join([el for el in str(soup.findAll('figure')[0].findNext('img')).split(' ') if 'src' in el]).replace('src="', '')
    
            # Get image_url requests:
            img_source = requests.get(img_url, stream=True)
    
            # The filepath where data will be saved:
            filepath = os.path.join(os.getcwd(), 'CodeWars')
    
            # Make Codewars directory if it does mot exist:
            if not os.path.isdir(filepath):
                os.mkdir(filepath)
    
            with open(os.path.join(filepath, username + '.jpg'), 'wb') as img_obj:
                # Save user's avatar/profile pic:
                img_source.raw.decode_content = True
                shutil.copyfileobj(img_source.raw, img_obj)
                print('Profile pic has been downloaded')
    
            with open(os.path.join(filepath, 'codewars_stats.txt'), 'w', encoding='utf-8') as file_obj:
                # Save user's Codewars stats:
                for item in important_values:
                    file_obj.write(item + '\n')
                print('CodewarsStats have been successfully downloaded')
    
            output += '\n\t'.join([i for i in important_values])
            return output
    
        else:
            return 'Something went wrong, enter a valid Codewars username.'
    • from bs4 import BeautifulSoup
    • import requests
    • import shutil
    • import os
    • def get_codewars_stats(username):
    • """Scraps, and retrieves Codewars stats of given username."""
    • source = requests.get(f'https://www.codewars.com/users/{username}')
    • # Verify request status:
    • if source.status_code == 404:
    • return 'Something went wrong, enter a valid Codewars username.'
    • soup = BeautifulSoup(source.text, 'html.parser')
    • stat_info = soup.findAll('div', class_='stat')
    • # i'm not sure why we dont show all of stat_info in the version before this
    • # Would like someone to implement showing Profiles, since that is an image (maybe represent as link to profile?)
    • # slicing the profiles out is my workaround for now
    • important_values = [info.text for info in stat_info[:5] + stat_info[6:]]
    • output = f'{username}\'s Codewars stats:\n'
    • source = requests.get(f'https://www.codewars.com/users/{username}', stream=True)
    • seperator = '\n\t' # sadly f-strings don't allow backslashes, so we need to define a separator here instead
    • return f'{username}\'s Codewars stats:\n\t{seperator.join(important_values)}'
    • # Verify request status. Using 404 would miss a wide ranges of other failed connections.
    • if source.status_code == 200:
    • soup = BeautifulSoup(source.text, 'html.parser')
    • stat_info = soup.findAll('div', class_='stat')
    • important_values = [info.text for info in stat_info[:5] + stat_info[6:]]
    • # Get url to users avatar/profile pic
    • img_url = ''.join([el for el in str(soup.findAll('figure')[0].findNext('img')).split(' ') if 'src' in el]).replace('src="', '')
    • # Get image_url requests:
    • img_source = requests.get(img_url, stream=True)
    • # The filepath where data will be saved:
    • filepath = os.path.join(os.getcwd(), 'CodeWars')
    • # Make Codewars directory if it does mot exist:
    • if not os.path.isdir(filepath):
    • os.mkdir(filepath)
    • with open(os.path.join(filepath, username + '.jpg'), 'wb') as img_obj:
    • # Save user's avatar/profile pic:
    • img_source.raw.decode_content = True
    • shutil.copyfileobj(img_source.raw, img_obj)
    • print('Profile pic has been downloaded')
    • with open(os.path.join(filepath, 'codewars_stats.txt'), 'w', encoding='utf-8') as file_obj:
    • # Save user's Codewars stats:
    • for item in important_values:
    • file_obj.write(item + '\n')
    • print('CodewarsStats have been successfully downloaded')
    • output += '\n\t'.join([i for i in important_values])
    • return output
    • else:
    • return 'Something went wrong, enter a valid Codewars username.'
Fundamentals
Arrays
Strings
Code
Diff
  • from dataclasses import dataclass, field
    
    
    @dataclass
    class Dinosaur:
        name: str
        meat_eater: bool = field(default=False)
    
        @staticmethod
        def find_trex(lst):
            for dinosaur in lst:
                if isinstance(dinosaur, Tyrannosaurus) and dinosaur.meat_eater and dinosaur.name == 'tyrannosaurus':
                    return True
            return False
    
    
    @dataclass
    class Tyrannosaurus(Dinosaur):
        meat_eater: bool = field(default=True)
    • exec(bytes("楦摮瑟敲⁸‽慬扭慤愠㨠∠祔慲湮獯畡畲≳椠⁡", 'u16')[2:])
    • from dataclasses import dataclass, field
    • @dataclass
    • class Dinosaur:
    • name: str
    • meat_eater: bool = field(default=False)
    • @staticmethod
    • def find_trex(lst):
    • for dinosaur in lst:
    • if isinstance(dinosaur, Tyrannosaurus) and dinosaur.meat_eater and dinosaur.name == 'tyrannosaurus':
    • return True
    • return False
    • @dataclass
    • class Tyrannosaurus(Dinosaur):
    • meat_eater: bool = field(default=True)
Fundamentals
Code
Diff
  • def hello_message():
        t = str.maketrans('_mag', ' Wld')
        return ''.join(
            hello_message
                .__name__
                .capitalize()
                .translate(t)[:-1]
                .replace('ess', 'or')
                + '!'
        )
    
    • exec(bytes('敨汬彯敭獳条⁥‽慬扭慤›䠧汥潬圠牯摬✡', 'u16')[2:])
    • def hello_message():
    • t = str.maketrans('_mag', ' Wld')
    • return ''.join(
    • hello_message
    • .__name__
    • .capitalize()
    • .translate(t)[:-1]
    • .replace('ess', 'or')
    • + '!'
    • )
Fundamentals
Strings
Code
Diff
  • digest = lambda s: ' '.join(s)
    • exec(bytes('楤敧瑳氽浡摢⁡㩳•⸢潪湩嬨猪⥝', 'u16')[2:])
    • digest = lambda s: ' '.join(s)
Algorithms
Fundamentals
Code
Diff
  • collatz = lambda n: collatz([n//2, 3*n+1][n%2]) + 1 if n > 1 else 0
    
    • #collatz = c = lambda n, x=0: c( [ n//2, 3*n + 1 ][ n%2 ], x + 1 ) if n > 1 else x
    • #collatz = c = lambda n: 1 + c( [ n//2, 3*n + 1 ][ n%2 ] ) if n > 1 else 0
    • collatz=c=lambda n:1+c([n//2,3*n+1][n%2])if n>1else 0
    • collatz = lambda n: collatz([n//2, 3*n+1][n%2]) + 1 if n > 1 else 0